diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-05-07 13:15:46 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-05-07 13:15:46 +0200 |
commit | 2fe5de9ce7d57498abc14b375cad2fcf8c3ee6cc (patch) | |
tree | 9478e8cf470c1d5bdb2d89b57a7e35919ab95e72 /drivers/char | |
parent | 08f8aeb55d7727d644dbbbbfb798fe937d47751d (diff) | |
parent | 2b4cfe64dee0d84506b951d81bf55d9891744d25 (diff) |
Merge branch 'sched/urgent' into sched/core, to avoid conflicts
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/Kconfig | 4 | ||||
-rw-r--r-- | drivers/char/hw_random/Kconfig | 6 | ||||
-rw-r--r-- | drivers/char/hw_random/atmel-rng.c | 23 | ||||
-rw-r--r-- | drivers/char/hw_random/bcm2835-rng.c | 10 | ||||
-rw-r--r-- | drivers/char/hw_random/core.c | 17 | ||||
-rw-r--r-- | drivers/char/hw_random/nomadik-rng.c | 13 | ||||
-rw-r--r-- | drivers/char/hw_random/omap3-rom-rng.c | 3 | ||||
-rw-r--r-- | drivers/char/hw_random/picoxcell-rng.c | 27 | ||||
-rw-r--r-- | drivers/char/hw_random/timeriomem-rng.c | 40 | ||||
-rw-r--r-- | drivers/char/hw_random/virtio-rng.c | 3 | ||||
-rw-r--r-- | drivers/char/ipmi/Kconfig | 12 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_bt_sm.c | 2 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_kcs_sm.c | 5 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 239 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 145 | ||||
-rw-r--r-- | drivers/char/pcmcia/Kconfig | 2 | ||||
-rw-r--r-- | drivers/char/random.c | 244 | ||||
-rw-r--r-- | drivers/char/tpm/Kconfig | 2 | ||||
-rw-r--r-- | drivers/char/ttyprintk.c | 15 | ||||
-rw-r--r-- | drivers/char/virtio_console.c | 4 |
20 files changed, 450 insertions, 366 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 1386749b48f..6e9f74a5c09 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -40,7 +40,7 @@ config SGI_MBCS source "drivers/tty/serial/Kconfig" config TTY_PRINTK - bool "TTY driver to output user messages via printk" + tristate "TTY driver to output user messages via printk" depends on EXPERT && TTY default n ---help--- @@ -408,7 +408,7 @@ config APPLICOM config SONYPI tristate "Sony Vaio Programmable I/O Control Device support" - depends on X86 && PCI && INPUT && !64BIT + depends on X86_32 && PCI && INPUT ---help--- This driver enables access to the Sony Programmable I/O Control Device which can be found in many (all ?) Sony Vaio laptops. diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 2f2b08457c6..244759bbd7b 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -342,11 +342,11 @@ config HW_RANDOM_TPM If unsure, say Y. config HW_RANDOM_MSM - tristate "Qualcomm MSM Random Number Generator support" - depends on HW_RANDOM && ARCH_MSM + tristate "Qualcomm SoCs Random Number Generator support" + depends on HW_RANDOM && ARCH_QCOM ---help--- This driver provides kernel-side support for the Random Number - Generator hardware found on Qualcomm MSM SoCs. + Generator hardware found on Qualcomm SoCs. To compile this driver as a module, choose M here. the module will be called msm-rng. diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index bf9fc6b7932..851bc7e20ad 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -54,29 +54,22 @@ static int atmel_trng_probe(struct platform_device *pdev) struct resource *res; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -EINVAL; - trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; - if (!devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), pdev->name)) - return -EBUSY; - - trng->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!trng->base) - return -EBUSY; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + trng->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(trng->base)) + return PTR_ERR(trng->base); - trng->clk = clk_get(&pdev->dev, NULL); + trng->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(trng->clk)) return PTR_ERR(trng->clk); ret = clk_enable(trng->clk); if (ret) - goto err_enable; + return ret; writel(TRNG_KEY | 1, trng->base + TRNG_CR); trng->rng.name = pdev->name; @@ -92,9 +85,6 @@ static int atmel_trng_probe(struct platform_device *pdev) err_register: clk_disable(trng->clk); -err_enable: - clk_put(trng->clk); - return ret; } @@ -106,7 +96,6 @@ static int atmel_trng_remove(struct platform_device *pdev) writel(TRNG_KEY, trng->base + TRNG_CR); clk_disable(trng->clk); - clk_put(trng->clk); return 0; } diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index 8c3b255e629..e900961cdd2 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -61,18 +61,18 @@ static int bcm2835_rng_probe(struct platform_device *pdev) } bcm2835_rng_ops.priv = (unsigned long)rng_base; + /* set warm-up count & enable */ + __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS); + __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL); + /* register driver */ err = hwrng_register(&bcm2835_rng_ops); if (err) { dev_err(dev, "hwrng registration failed\n"); iounmap(rng_base); - } else { + } else dev_info(dev, "hwrng registered\n"); - /* set warm-up count & enable */ - __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS); - __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL); - } return err; } diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index b9495a8c05c..334601cc81c 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -40,6 +40,7 @@ #include <linux/miscdevice.h> #include <linux/delay.h> #include <linux/slab.h> +#include <linux/random.h> #include <asm/uaccess.h> @@ -301,9 +302,10 @@ err_misc_dereg: int hwrng_register(struct hwrng *rng) { - int must_register_misc; int err = -EINVAL; struct hwrng *old_rng, *tmp; + unsigned char bytes[16]; + int bytes_read; if (rng->name == NULL || (rng->data_read == NULL && rng->read == NULL)) @@ -326,7 +328,6 @@ int hwrng_register(struct hwrng *rng) goto out_unlock; } - must_register_misc = (current_rng == NULL); old_rng = current_rng; if (!old_rng) { err = hwrng_init(rng); @@ -335,18 +336,20 @@ int hwrng_register(struct hwrng *rng) current_rng = rng; } err = 0; - if (must_register_misc) { + if (!old_rng) { err = register_miscdev(); if (err) { - if (!old_rng) { - hwrng_cleanup(rng); - current_rng = NULL; - } + hwrng_cleanup(rng); + current_rng = NULL; goto out_unlock; } } INIT_LIST_HEAD(&rng->list); list_add_tail(&rng->list, &rng_list); + + bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); + if (bytes_read > 0) + add_device_randomness(bytes, bytes_read); out_unlock: mutex_unlock(&rng_mutex); out: diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index 00e9d2d4663..9c858157724 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -43,7 +43,7 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) void __iomem *base; int ret; - rng_clk = clk_get(&dev->dev, NULL); + rng_clk = devm_clk_get(&dev->dev, NULL); if (IS_ERR(rng_clk)) { dev_err(&dev->dev, "could not get rng clock\n"); ret = PTR_ERR(rng_clk); @@ -56,33 +56,28 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) if (ret) goto out_clk; ret = -ENOMEM; - base = ioremap(dev->res.start, resource_size(&dev->res)); + base = devm_ioremap(&dev->dev, dev->res.start, + resource_size(&dev->res)); if (!base) goto out_release; nmk_rng.priv = (unsigned long)base; ret = hwrng_register(&nmk_rng); if (ret) - goto out_unmap; + goto out_release; return 0; -out_unmap: - iounmap(base); out_release: amba_release_regions(dev); out_clk: clk_disable(rng_clk); - clk_put(rng_clk); return ret; } static int nmk_rng_remove(struct amba_device *dev) { - void __iomem *base = (void __iomem *)nmk_rng.priv; hwrng_unregister(&nmk_rng); - iounmap(base); amba_release_regions(dev); clk_disable(rng_clk); - clk_put(rng_clk); return 0; } diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index c853e9e6857..6f2eaffed62 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -103,7 +103,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev) } setup_timer(&idle_timer, omap3_rom_rng_idle, 0); - rng_clk = clk_get(&pdev->dev, "ick"); + rng_clk = devm_clk_get(&pdev->dev, "ick"); if (IS_ERR(rng_clk)) { pr_err("unable to get RNG clock\n"); return PTR_ERR(rng_clk); @@ -120,7 +120,6 @@ static int omap3_rom_rng_remove(struct platform_device *pdev) { hwrng_unregister(&omap3_rom_rng_ops); clk_disable_unprepare(rng_clk); - clk_put(rng_clk); return 0; } diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c index 3d4c2293c6f..eab5448ad56 100644 --- a/drivers/char/hw_random/picoxcell-rng.c +++ b/drivers/char/hw_random/picoxcell-rng.c @@ -104,24 +104,11 @@ static int picoxcell_trng_probe(struct platform_device *pdev) int ret; struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - dev_warn(&pdev->dev, "no memory resource\n"); - return -ENOMEM; - } - - if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), - "picoxcell_trng")) { - dev_warn(&pdev->dev, "unable to request io mem\n"); - return -EBUSY; - } + rng_base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(rng_base)) + return PTR_ERR(rng_base); - rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); - if (!rng_base) { - dev_warn(&pdev->dev, "unable to remap io mem\n"); - return -ENOMEM; - } - - rng_clk = clk_get(&pdev->dev, NULL); + rng_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(rng_clk)) { dev_warn(&pdev->dev, "no clk\n"); return PTR_ERR(rng_clk); @@ -130,7 +117,7 @@ static int picoxcell_trng_probe(struct platform_device *pdev) ret = clk_enable(rng_clk); if (ret) { dev_warn(&pdev->dev, "unable to enable clk\n"); - goto err_enable; + return ret; } picoxcell_trng_start(); @@ -145,9 +132,6 @@ static int picoxcell_trng_probe(struct platform_device *pdev) err_register: clk_disable(rng_clk); -err_enable: - clk_put(rng_clk); - return ret; } @@ -155,7 +139,6 @@ static int picoxcell_trng_remove(struct platform_device *pdev) { hwrng_unregister(&picoxcell_trng); clk_disable(rng_clk); - clk_put(rng_clk); return 0; } diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index 73ce739f8e1..439ff8b28c4 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -118,7 +118,8 @@ static int timeriomem_rng_probe(struct platform_device *pdev) } /* Allocate memory for the device structure (and zero it) */ - priv = kzalloc(sizeof(struct timeriomem_rng_private_data), GFP_KERNEL); + priv = devm_kzalloc(&pdev->dev, + sizeof(struct timeriomem_rng_private_data), GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "failed to allocate device structure.\n"); return -ENOMEM; @@ -134,17 +135,16 @@ static int timeriomem_rng_probe(struct platform_device *pdev) period = i; else { dev_err(&pdev->dev, "missing period\n"); - err = -EINVAL; - goto out_free; + return -EINVAL; } - } else + } else { period = pdata->period; + } priv->period = usecs_to_jiffies(period); if (priv->period < 1) { dev_err(&pdev->dev, "period is less than one jiffy\n"); - err = -EINVAL; - goto out_free; + return -EINVAL; } priv->expires = jiffies; @@ -160,24 +160,16 @@ static int timeriomem_rng_probe(struct platform_device *pdev) priv->timeriomem_rng_ops.data_read = timeriomem_rng_data_read; priv->timeriomem_rng_ops.priv = (unsigned long)priv; - if (!request_mem_region(res->start, resource_size(res), - dev_name(&pdev->dev))) { - dev_err(&pdev->dev, "request_mem_region failed\n"); - err = -EBUSY; + priv->io_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->io_base)) { + err = PTR_ERR(priv->io_base); goto out_timer; } - priv->io_base = ioremap(res->start, resource_size(res)); - if (priv->io_base == NULL) { - dev_err(&pdev->dev, "ioremap failed\n"); - err = -EIO; - goto out_release_io; - } - err = hwrng_register(&priv->timeriomem_rng_ops); if (err) { dev_err(&pdev->dev, "problem registering\n"); - goto out; + goto out_timer; } dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", @@ -185,30 +177,18 @@ static int timeriomem_rng_probe(struct platform_device *pdev) return 0; -out: - iounmap(priv->io_base); -out_release_io: - release_mem_region(res->start, resource_size(res)); out_timer: del_timer_sync(&priv->timer); -out_free: - kfree(priv); return err; } static int timeriomem_rng_remove(struct platform_device *pdev) { struct timeriomem_rng_private_data *priv = platform_get_drvdata(pdev); - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hwrng_unregister(&priv->timeriomem_rng_ops); del_timer_sync(&priv->timer); - iounmap(priv->io_base); - release_mem_region(res->start, resource_size(res)); - kfree(priv); return 0; } diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index c12398d1517..2ce0e225e58 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -47,8 +47,7 @@ static void register_buffer(u8 *buf, size_t size) sg_init_one(&sg, buf, size); /* There should always be room for one buffer. */ - if (virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL) < 0) - BUG(); + virtqueue_add_inbuf(vq, &sg, 1, buf, GFP_KERNEL); virtqueue_kick(vq); } diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index 0baa8fab4ea..db1c9b7adaa 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -50,6 +50,18 @@ config IPMI_SI Currently, only KCS and SMIC are supported. If you are using IPMI, you should probably say "y" here. +config IPMI_SI_PROBE_DEFAULTS + bool 'Probe for all possible IPMI system interfaces by default' + default n + depends on IPMI_SI + help + Modern systems will usually expose IPMI interfaces via a discoverable + firmware mechanism such as ACPI or DMI. Older systems do not, and so + the driver is forced to probe hardware manually. This may cause boot + delays. Say "n" here to disable this manual probing. IPMI will then + only be available on older systems if the "ipmi_si_intf.trydefaults=1" + boot argument is passed. + config IPMI_WATCHDOG tristate 'IPMI Watchdog Timer' help diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index f5e4cd7617f..61e71616689 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c @@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt) static inline int read_all_bytes(struct si_sm_data *bt) { - unsigned char i; + unsigned int i; /* * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index 6a4bdc18955..8c25f596808 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c @@ -251,8 +251,9 @@ static inline int check_obf(struct si_sm_data *kcs, unsigned char status, if (!GET_STATUS_OBF(status)) { kcs->obf_timeout -= time; if (kcs->obf_timeout < 0) { - start_error_recovery(kcs, "OBF not ready in time"); - return 1; + kcs->obf_timeout = OBF_RETRY_TIMEOUT; + start_error_recovery(kcs, "OBF not ready in time"); + return 1; } return 0; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index ec4e10fcf1a..e6db9381b2c 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -55,6 +55,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); static void smi_recv_tasklet(unsigned long); static void handle_new_recv_msgs(ipmi_smi_t intf); +static void need_waiter(ipmi_smi_t intf); static int initialized; @@ -73,14 +74,28 @@ static struct proc_dir_entry *proc_ipmi_root; */ #define MAX_MSG_TIMEOUT 60000 +/* Call every ~1000 ms. */ +#define IPMI_TIMEOUT_TIME 1000 + +/* How many jiffies does it take to get to the timeout time. */ +#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) + +/* + * Request events from the queue every second (this is the number of + * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the + * future, IPMI will add a way to know immediately if an event is in + * the queue and this silliness can go away. + */ +#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) + /* * The main "user" data structure. */ struct ipmi_user { struct list_head link; - /* Set to "0" when the user is destroyed. */ - int valid; + /* Set to false when the user is destroyed. */ + bool valid; struct kref refcount; @@ -92,7 +107,7 @@ struct ipmi_user { ipmi_smi_t intf; /* Does this interface receive IPMI events? */ - int gets_events; + bool gets_events; }; struct cmd_rcvr { @@ -383,6 +398,9 @@ struct ipmi_smi { unsigned int waiting_events_count; /* How many events in queue? */ char delivering_events; char event_msg_printed; + atomic_t event_waiters; + unsigned int ticks_to_req_ev; + int last_needs_timer; /* * The event receiver for my BMC, only really used at panic @@ -395,7 +413,7 @@ struct ipmi_smi { /* For handling of maintenance mode. */ int maintenance_mode; - int maintenance_mode_enable; + bool maintenance_mode_enable; int auto_maintenance_timeout; spinlock_t maintenance_mode_lock; /* Used in a timer... */ @@ -451,7 +469,6 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex); static LIST_HEAD(smi_watchers); static DEFINE_MUTEX(smi_watchers_mutex); - #define ipmi_inc_stat(intf, stat) \ atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) #define ipmi_get_stat(intf, stat) \ @@ -772,6 +789,7 @@ static int intf_next_seq(ipmi_smi_t intf, *seq = i; *seqid = intf->seq_table[i].seqid; intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; + need_waiter(intf); } else { rv = -EAGAIN; } @@ -941,7 +959,7 @@ int ipmi_create_user(unsigned int if_num, new_user->handler = handler; new_user->handler_data = handler_data; new_user->intf = intf; - new_user->gets_events = 0; + new_user->gets_events = false; if (!try_module_get(intf->handlers->owner)) { rv = -ENODEV; @@ -962,10 +980,15 @@ int ipmi_create_user(unsigned int if_num, */ mutex_unlock(&ipmi_interfaces_mutex); - new_user->valid = 1; + new_user->valid = true; spin_lock_irqsave(&intf->seq_lock, flags); list_add_rcu(&new_user->link, &intf->users); spin_unlock_irqrestore(&intf->seq_lock, flags); + if (handler->ipmi_watchdog_pretimeout) { + /* User wants pretimeouts, so make sure to watch for them. */ + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + } *user = new_user; return 0; @@ -1019,7 +1042,13 @@ int ipmi_destroy_user(ipmi_user_t user) struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; - user->valid = 0; + user->valid = false; + + if (user->handler->ipmi_watchdog_pretimeout) + atomic_dec(&intf->event_waiters); + + if (user->gets_events) + atomic_dec(&intf->event_waiters); /* Remove the user from the interface's sequence table. */ spin_lock_irqsave(&intf->seq_lock, flags); @@ -1155,25 +1184,23 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) if (intf->maintenance_mode != mode) { switch (mode) { case IPMI_MAINTENANCE_MODE_AUTO: - intf->maintenance_mode = mode; intf->maintenance_mode_enable = (intf->auto_maintenance_timeout > 0); break; case IPMI_MAINTENANCE_MODE_OFF: - intf->maintenance_mode = mode; - intf->maintenance_mode_enable = 0; + intf->maintenance_mode_enable = false; break; case IPMI_MAINTENANCE_MODE_ON: - intf->maintenance_mode = mode; - intf->maintenance_mode_enable = 1; + intf->maintenance_mode_enable = true; break; default: rv = -EINVAL; goto out_unlock; } + intf->maintenance_mode = mode; maintenance_mode_update(intf); } @@ -1184,7 +1211,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) } EXPORT_SYMBOL(ipmi_set_maintenance_mode); -int ipmi_set_gets_events(ipmi_user_t user, int val) +int ipmi_set_gets_events(ipmi_user_t user, bool val) { unsigned long flags; ipmi_smi_t intf = user->intf; @@ -1194,8 +1221,18 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) INIT_LIST_HEAD(&msgs); spin_lock_irqsave(&intf->events_lock, flags); + if (user->gets_events == val) + goto out; + user->gets_events = val; + if (val) { + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + } else { + atomic_dec(&intf->event_waiters); + } + if (intf->delivering_events) /* * Another thread is delivering events for this, so @@ -1289,6 +1326,9 @@ int ipmi_register_for_cmd(ipmi_user_t user, goto out_unlock; } + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); out_unlock: @@ -1330,6 +1370,7 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, mutex_unlock(&intf->cmd_rcvrs_mutex); synchronize_rcu(); while (rcvrs) { + atomic_dec(&intf->event_waiters); rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); @@ -1535,7 +1576,7 @@ static int i_ipmi_request(ipmi_user_t user, = IPMI_MAINTENANCE_MODE_TIMEOUT; if (!intf->maintenance_mode && !intf->maintenance_mode_enable) { - intf->maintenance_mode_enable = 1; + intf->maintenance_mode_enable = true; maintenance_mode_update(intf); } spin_unlock_irqrestore(&intf->maintenance_mode_lock, @@ -2876,6 +2917,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, (unsigned long) intf); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); spin_lock_init(&intf->events_lock); + atomic_set(&intf->event_waiters, 0); + intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; INIT_LIST_HEAD(&intf->waiting_events); intf->waiting_events_count = 0; mutex_init(&intf->cmd_rcvrs_mutex); @@ -3965,7 +4008,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, struct list_head *timeouts, long timeout_period, - int slot, unsigned long *flags) + int slot, unsigned long *flags, + unsigned int *waiting_msgs) { struct ipmi_recv_msg *msg; struct ipmi_smi_handlers *handlers; @@ -3977,8 +4021,10 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, return; ent->timeout -= timeout_period; - if (ent->timeout > 0) + if (ent->timeout > 0) { + (*waiting_msgs)++; return; + } if (ent->retries_left == 0) { /* The message has used all its retries. */ @@ -3995,6 +4041,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, struct ipmi_smi_msg *smi_msg; /* More retries, send again. */ + (*waiting_msgs)++; + /* * Start with the max timer, set to normal timer after * the message is sent. @@ -4040,117 +4088,118 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, } } -static void ipmi_timeout_handler(long timeout_period) +static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) { - ipmi_smi_t intf; struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; unsigned long flags; int i; + unsigned int waiting_msgs = 0; - rcu_read_lock(); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - tasklet_schedule(&intf->recv_tasklet); - - /* - * Go through the seq table and find any messages that - * have timed out, putting them in the timeouts - * list. - */ - INIT_LIST_HEAD(&timeouts); - spin_lock_irqsave(&intf->seq_lock, flags); - for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) - check_msg_timeout(intf, &(intf->seq_table[i]), - &timeouts, timeout_period, i, - &flags); - spin_unlock_irqrestore(&intf->seq_lock, flags); + /* + * Go through the seq table and find any messages that + * have timed out, putting them in the timeouts + * list. + */ + INIT_LIST_HEAD(&timeouts); + spin_lock_irqsave(&intf->seq_lock, flags); + for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) + check_msg_timeout(intf, &(intf->seq_table[i]), + &timeouts, timeout_period, i, + &flags, &waiting_msgs); + spin_unlock_irqrestore(&intf->seq_lock, flags); - list_for_each_entry_safe(msg, msg2, &timeouts, link) - deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); + list_for_each_entry_safe(msg, msg2, &timeouts, link) + deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); - /* - * Maintenance mode handling. Check the timeout - * optimistically before we claim the lock. It may - * mean a timeout gets missed occasionally, but that - * only means the timeout gets extended by one period - * in that case. No big deal, and it avoids the lock - * most of the time. - */ + /* + * Maintenance mode handling. Check the timeout + * optimistically before we claim the lock. It may + * mean a timeout gets missed occasionally, but that + * only means the timeout gets extended by one period + * in that case. No big deal, and it avoids the lock + * most of the time. + */ + if (intf->auto_maintenance_timeout > 0) { + spin_lock_irqsave(&intf->maintenance_mode_lock, flags); if (intf->auto_maintenance_timeout > 0) { - spin_lock_irqsave(&intf->maintenance_mode_lock, flags); - if (intf->auto_maintenance_timeout > 0) { - intf->auto_maintenance_timeout - -= timeout_period; - if (!intf->maintenance_mode - && (intf->auto_maintenance_timeout <= 0)) { - intf->maintenance_mode_enable = 0; - maintenance_mode_update(intf); - } + intf->auto_maintenance_timeout + -= timeout_period; + if (!intf->maintenance_mode + && (intf->auto_maintenance_timeout <= 0)) { + intf->maintenance_mode_enable = false; + maintenance_mode_update(intf); } - spin_unlock_irqrestore(&intf->maintenance_mode_lock, - flags); } + spin_unlock_irqrestore(&intf->maintenance_mode_lock, + flags); } - rcu_read_unlock(); + + tasklet_schedule(&intf->recv_tasklet); + + return waiting_msgs; } -static void ipmi_request_event(void) +static void ipmi_request_event(ipmi_smi_t intf) { - ipmi_smi_t intf; struct ipmi_smi_handlers *handlers; - rcu_read_lock(); - /* - * Called from the timer, no need to check if handlers is - * valid. - */ - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - /* No event requests when in maintenance mode. */ - if (intf->maintenance_mode_enable) - continue; + /* No event requests when in maintenance mode. */ + if (intf->maintenance_mode_enable) + return; - handlers = intf->handlers; - if (handlers) - handlers->request_events(intf->send_info); - } - rcu_read_unlock(); + handlers = intf->handlers; + if (handlers) + handlers->request_events(intf->send_info); } static struct timer_list ipmi_timer; -/* Call every ~1000 ms. */ -#define IPMI_TIMEOUT_TIME 1000 - -/* How many jiffies does it take to get to the timeout time. */ -#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) - -/* - * Request events from the queue every second (this is the number of - * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the - * future, IPMI will add a way to know immediately if an event is in - * the queue and this silliness can go away. - */ -#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) - static atomic_t stop_operation; -static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME; static void ipmi_timeout(unsigned long data) { + ipmi_smi_t intf; + int nt = 0; + if (atomic_read(&stop_operation)) return; - ticks_to_req_ev--; - if (ticks_to_req_ev == 0) { - ipmi_request_event(); - ticks_to_req_ev = IPMI_REQUEST_EV_TIME; - } + rcu_read_lock(); + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + int lnt = 0; + + if (atomic_read( |