diff options
-rw-r--r-- | Documentation/block/queue-sysfs.txt | 63 | ||||
-rw-r--r-- | block/blk-core.c | 6 | ||||
-rw-r--r-- | block/blk.h | 8 | ||||
-rw-r--r-- | drivers/usb/host/whci/asl.c | 9 | ||||
-rw-r--r-- | drivers/usb/host/whci/hw.c | 15 | ||||
-rw-r--r-- | drivers/usb/host/whci/pzl.c | 9 | ||||
-rw-r--r-- | drivers/usb/host/whci/whcd.h | 1 | ||||
-rw-r--r-- | drivers/usb/wusbcore/devconnect.c | 1 | ||||
-rw-r--r-- | drivers/usb/wusbcore/rh.c | 3 | ||||
-rw-r--r-- | drivers/uwb/allocator.c | 1 | ||||
-rw-r--r-- | drivers/uwb/drp.c | 4 | ||||
-rw-r--r-- | drivers/uwb/rsv.c | 21 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci.c | 2 | ||||
-rw-r--r-- | include/linux/bio.h | 10 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 | ||||
-rw-r--r-- | include/linux/module.h | 25 | ||||
-rw-r--r-- | kernel/module.c | 35 |
17 files changed, 174 insertions, 41 deletions
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt new file mode 100644 index 00000000000..e164403f60e --- /dev/null +++ b/Documentation/block/queue-sysfs.txt @@ -0,0 +1,63 @@ +Queue sysfs files +================= + +This text file will detail the queue files that are located in the sysfs tree +for each block device. Note that stacked devices typically do not export +any settings, since their queue merely functions are a remapping target. +These files are the ones found in the /sys/block/xxx/queue/ directory. + +Files denoted with a RO postfix are readonly and the RW postfix means +read-write. + +hw_sector_size (RO) +------------------- +This is the hardware sector size of the device, in bytes. + +max_hw_sectors_kb (RO) +---------------------- +This is the maximum number of kilobytes supported in a single data transfer. + +max_sectors_kb (RW) +------------------- +This is the maximum number of kilobytes that the block layer will allow +for a filesystem request. Must be smaller than or equal to the maximum +size allowed by the hardware. + +nomerges (RW) +------------- +This enables the user to disable the lookup logic involved with IO merging +requests in the block layer. Merging may still occur through a direct +1-hit cache, since that comes for (almost) free. The IO scheduler will not +waste cycles doing tree/hash lookups for merges if nomerges is 1. Defaults +to 0, enabling all merges. + +nr_requests (RW) +---------------- +This controls how many requests may be allocated in the block layer for +read or write requests. Note that the total allocated number may be twice +this amount, since it applies only to reads or writes (not the accumulated +sum). + +read_ahead_kb (RW) +------------------ +Maximum number of kilobytes to read-ahead for filesystems on this block +device. + +rq_affinity (RW) +---------------- +If this option is enabled, the block layer will migrate request completions +to the CPU that originally submitted the request. For some workloads +this provides a significant reduction in CPU cycles due to caching effects. + +scheduler (RW) +-------------- +When read, this file will display the current and available IO schedulers +for this block device. The currently active IO scheduler will be enclosed +in [] brackets. Writing an IO scheduler name to this file will switch +control of this block device to that new IO scheduler. Note that writing +an IO scheduler name to this file will attempt to load that IO scheduler +module, if it isn't already present in the system. + + + +Jens Axboe <jens.axboe@oracle.com>, February 2009 diff --git a/block/blk-core.c b/block/blk-core.c index ca69f3d9410..29bcfac6c68 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -69,7 +69,7 @@ static void drive_stat_acct(struct request *rq, int new_io) int rw = rq_data_dir(rq); int cpu; - if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue)) + if (!blk_fs_request(rq) || !disk || !blk_do_io_stat(disk->queue)) return; cpu = part_stat_lock(); @@ -1667,7 +1667,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) { struct gendisk *disk = req->rq_disk; - if (!disk || !blk_queue_io_stat(disk->queue)) + if (!disk || !blk_do_io_stat(disk->queue)) return; if (blk_fs_request(req)) { @@ -1686,7 +1686,7 @@ static void blk_account_io_done(struct request *req) { struct gendisk *disk = req->rq_disk; - if (!disk || !blk_queue_io_stat(disk->queue)) + if (!disk || !blk_do_io_stat(disk->queue)) return; /* diff --git a/block/blk.h b/block/blk.h index 6e1ed40534e..0dce92c3749 100644 --- a/block/blk.h +++ b/block/blk.h @@ -108,4 +108,12 @@ static inline int blk_cpu_to_group(int cpu) #endif } +static inline int blk_do_io_stat(struct request_queue *q) +{ + if (q) + return blk_queue_io_stat(q); + + return 0; +} + #endif diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c index 577c0d29849..2291c5f5af5 100644 --- a/drivers/usb/host/whci/asl.c +++ b/drivers/usb/host/whci/asl.c @@ -170,12 +170,17 @@ void asl_stop(struct whc *whc) void asl_update(struct whc *whc, uint32_t wusbcmd) { struct wusbhc *wusbhc = &whc->wusbhc; + long t; mutex_lock(&wusbhc->mutex); if (wusbhc->active) { whc_write_wusbcmd(whc, wusbcmd, wusbcmd); - wait_event(whc->async_list_wq, - (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0); + t = wait_event_timeout( + whc->async_list_wq, + (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0, + msecs_to_jiffies(1000)); + if (t == 0) + whc_hw_error(whc, "ASL update timeout"); } mutex_unlock(&wusbhc->mutex); } diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c index d498e720321..6afa2e37916 100644 --- a/drivers/usb/host/whci/hw.c +++ b/drivers/usb/host/whci/hw.c @@ -87,3 +87,18 @@ out: return ret; } + +/** + * whc_hw_error - recover from a hardware error + * @whc: the WHCI HC that broke. + * @reason: a description of the failure. + * + * Recover from broken hardware with a full reset. + */ +void whc_hw_error(struct whc *whc, const char *reason) +{ + struct wusbhc *wusbhc = &whc->wusbhc; + + dev_err(&whc->umc->dev, "hardware error: %s\n", reason); + wusbhc_reset_all(wusbhc); +} diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c index 2ae5abf69a6..7dc85a0bee7 100644 --- a/drivers/usb/host/whci/pzl.c +++ b/drivers/usb/host/whci/pzl.c @@ -183,12 +183,17 @@ void pzl_stop(struct whc *whc) void pzl_update(struct whc *whc, uint32_t wusbcmd) { struct wusbhc *wusbhc = &whc->wusbhc; + long t; mutex_lock(&wusbhc->mutex); if (wusbhc->active) { whc_write_wusbcmd(whc, wusbcmd, wusbcmd); - wait_event(whc->periodic_list_wq, - (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0); + t = wait_event_timeout( + whc->periodic_list_wq, + (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0, + msecs_to_jiffies(1000)); + if (t == 0) + whc_hw_error(whc, "PZL update timeout"); } mutex_unlock(&wusbhc->mutex); } diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h index 0f3540f04f5..d3543a181dc 100644 --- a/drivers/usb/host/whci/whcd.h +++ b/drivers/usb/host/whci/whcd.h @@ -137,6 +137,7 @@ void whc_clean_up(struct whc *whc); /* hw.c */ void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val); int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len); +void whc_hw_error(struct whc *whc, const char *reason); /* wusb.c */ int whc_wusbhc_start(struct wusbhc *wusbhc); diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index e2e7e4bc846..8e18141bb2e 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c @@ -386,6 +386,7 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE; if (wusb_dev) { + dev_dbg(wusbhc->dev, "disconnecting device from port %d\n", wusb_dev->port_idx); if (!list_empty(&wusb_dev->cack_node)) list_del_init(&wusb_dev->cack_node); /* For the one in cack_add() */ diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c index 3937bf6f8ce..9fe4246cecb 100644 --- a/drivers/usb/wusbcore/rh.c +++ b/drivers/usb/wusbcore/rh.c @@ -100,6 +100,9 @@ static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx) struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); struct wusb_dev *wusb_dev = port->wusb_dev; + if (wusb_dev == NULL) + return -ENOTCONN; + port->status |= USB_PORT_STAT_RESET; port->change |= USB_PORT_STAT_C_RESET; diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c index c8185e6b0cd..c13cec7dcbc 100644 --- a/drivers/uwb/allocator.c +++ b/drivers/uwb/allocator.c @@ -15,7 +15,6 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <linux/version.h> #include <linux/kernel.h> #include <linux/uwb.h> diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c index 2b4f9406789..4f5ca99a04b 100644 --- a/drivers/uwb/drp.c +++ b/drivers/uwb/drp.c @@ -66,14 +66,14 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg, } else dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n"); - spin_lock(&rc->rsvs_lock); + spin_lock_bh(&rc->rsvs_lock); if (rc->set_drp_ie_pending > 1) { rc->set_drp_ie_pending = 0; uwb_rsv_queue_update(rc); } else { rc->set_drp_ie_pending = 0; } - spin_unlock(&rc->rsvs_lock); + spin_unlock_bh(&rc->rsvs_lock); } /** diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c index ec6eecb32f3..6b76f4bb4cc 100644 --- a/drivers/uwb/rsv.c +++ b/drivers/uwb/rsv.c @@ -114,7 +114,8 @@ void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) devaddr = rsv->target.devaddr; uwb_dev_addr_print(target, sizeof(target), &devaddr); - dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); + dev_dbg(dev, "rsv %s %s -> %s: %s\n", + text, owner, target, uwb_rsv_state_str(rsv->state)); } static void uwb_rsv_release(struct kref *kref) @@ -511,8 +512,7 @@ void uwb_rsv_remove(struct uwb_rsv *rsv) if (uwb_rsv_is_owner(rsv)) uwb_rsv_put_stream(rsv); - - del_timer_sync(&rsv->timer); + uwb_dev_put(rsv->owner); if (rsv->target.type == UWB_RSV_TARGET_DEV) uwb_dev_put(rsv->target.dev); @@ -870,7 +870,7 @@ void uwb_rsv_queue_update(struct uwb_rc *rc) */ void uwb_rsv_sched_update(struct uwb_rc *rc) { - spin_lock(&rc->rsvs_lock); + spin_lock_bh(&rc->rsvs_lock); if (!delayed_work_pending(&rc->rsv_update_work)) { if (rc->set_drp_ie_pending > 0) { rc->set_drp_ie_pending++; @@ -879,7 +879,7 @@ void uwb_rsv_sched_update(struct uwb_rc *rc) uwb_rsv_queue_update(rc); } unlock: - spin_unlock(&rc->rsvs_lock); + spin_unlock_bh(&rc->rsvs_lock); } /* @@ -943,13 +943,22 @@ void uwb_rsv_remove_all(struct uwb_rc *rc) mutex_lock(&rc->rsvs_mutex); list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { - uwb_rsv_remove(rsv); + if (rsv->state != UWB_RSV_STATE_NONE) + uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); + del_timer_sync(&rsv->timer); } /* Cancel any postponed update. */ rc->set_drp_ie_pending = 0; mutex_unlock(&rc->rsvs_mutex); cancel_delayed_work_sync(&rc->rsv_update_work); + flush_workqueue(rc->rsv_workq); + + mutex_lock(&rc->rsvs_mutex); + list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { + uwb_rsv_remove(rsv); + } + mutex_unlock(&rc->rsvs_mutex); } void uwb_rsv_init(struct uwb_rc *rc) diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index bef6b45e8a5..330aacbdec1 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -192,7 +192,7 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); - if (drv->config_changed) + if (drv && drv->config_changed) drv->config_changed(&vp_dev->vdev); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 0942765cf8c..2aa283ab062 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -451,12 +451,13 @@ extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly; #ifdef CONFIG_HIGHMEM /* - * remember to add offset! and never ever reenable interrupts between a - * bvec_kmap_irq and bvec_kunmap_irq!! + * remember never ever reenable interrupts between a bvec_kmap_irq and + * bvec_kunmap_irq! * * This function MUST be inlined - it plays with the CPU interrupt flags. */ -static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +static __always_inline char *bvec_kmap_irq(struct bio_vec *bvec, + unsigned long *flags) { unsigned long addr; @@ -472,7 +473,8 @@ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) return (char *) addr + bvec->bv_offset; } -static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +static __always_inline void bvec_kunmap_irq(char *buffer, + unsigned long *flags) { unsigned long ptr = (unsigned long) buffer & PAGE_MASK; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d08c4b8219a..dcaa0fd84b0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -455,7 +455,7 @@ struct request_queue #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_CLUSTER) | \ - 1 << QUEUE_FLAG_STACKABLE) + (1 << QUEUE_FLAG_STACKABLE)) static inline int queue_is_locked(struct request_queue *q) { diff --git a/include/linux/module.h b/include/linux/module.h index 4f7ea12463d..f3b8329eb5b 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -219,11 +219,6 @@ void *__symbol_get_gpl(const char *symbol); #endif -struct module_ref -{ - local_t count; -} ____cacheline_aligned; - enum module_state { MODULE_STATE_LIVE, @@ -344,8 +339,11 @@ struct module /* Destruction function. */ void (*exit)(void); - /* Reference counts */ - struct module_ref ref[NR_CPUS]; +#ifdef CONFIG_SMP + char *refptr; +#else + local_t ref; +#endif #endif }; #ifndef MODULE_ARCH_INIT @@ -395,13 +393,22 @@ void __symbol_put(const char *symbol); #define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x) void symbol_put_addr(void *addr); +static inline local_t *__module_ref_addr(struct module *mod, int cpu) +{ +#ifdef CONFIG_SMP + return (local_t *) (mod->refptr + per_cpu_offset(cpu)); +#else + return &mod->ref; +#endif +} + /* Sometimes we know we already have a refcount, and it's easier not to handle the error case (which only happens with rmmod --wait). */ static inline void __module_get(struct module *module) { if (module) { BUG_ON(module_refcount(module) == 0); - local_inc(&module->ref[get_cpu()].count); + local_inc(__module_ref_addr(module, get_cpu())); put_cpu(); } } @@ -413,7 +420,7 @@ static inline int try_module_get(struct module *module) if (module) { unsigned int cpu = get_cpu(); if (likely(module_is_live(module))) - local_inc(&module->ref[cpu].count); + local_inc(__module_ref_addr(module, cpu)); else ret = 0; put_cpu(); diff --git a/kernel/module.c b/kernel/module.c index e8b51d41dd7..ba22484a987 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -573,13 +573,13 @@ static char last_unloaded_module[MODULE_NAME_LEN+1]; /* Init the unload section of the module. */ static void module_unload_init(struct module *mod) { - unsigned int i; + int cpu; INIT_LIST_HEAD(&mod->modules_which_use_me); - for (i = 0; i < NR_CPUS; i++) - local_set(&mod->ref[i].count, 0); + for_each_possible_cpu(cpu) + local_set(__module_ref_addr(mod, cpu), 0); /* Hold reference count during initialization. */ - local_set(&mod->ref[raw_smp_processor_id()].count, 1); + local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1); /* Backwards compatibility macros put refcount during init. */ mod->waiter = current; } @@ -717,10 +717,11 @@ static int try_stop_module(struct module *mod, int flags, int *forced) unsigned int module_refcount(struct module *mod) { - unsigned int i, total = 0; + unsigned int total = 0; + int cpu; - for (i = 0; i < NR_CPUS; i++) - total += local_read(&mod->ref[i].count); + for_each_possible_cpu(cpu) + total += local_read(__module_ref_addr(mod, cpu)); return total; } EXPORT_SYMBOL(module_refcount); @@ -894,7 +895,7 @@ void module_put(struct module *module) { if (module) { unsigned int cpu = get_cpu(); - local_dec(&module->ref[cpu].count); + local_dec(__module_ref_addr(module, cpu)); /* Maybe they're waiting for us to drop reference? */ if (unlikely(!module_is_live(module))) wake_up_process(module->waiter); @@ -1464,7 +1465,10 @@ static void free_module(struct module *mod) kfree(mod->args); if (mod->percpu) percpu_modfree(mod->percpu); - +#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) + if (mod->refptr) + percpu_modfree(mod->refptr); +#endif /* Free lock-classes: */ lockdep_free_key_range(mod->module_core, mod->core_size); @@ -2011,6 +2015,14 @@ static noinline struct module *load_module(void __user *umod, if (err < 0) goto free_mod; +#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) + mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), + mod->name); + if (!mod->refptr) { + err = -ENOMEM; + goto free_mod; + } +#endif if (pcpuindex) { /* We have a special allocation for this section. */ percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, @@ -2018,7 +2030,7 @@ static noinline struct module *load_module(void __user *umod, mod->name); if (!percpu) { err = -ENOMEM; - goto free_mod; + goto free_percpu; } sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; mod->percpu = percpu; @@ -2282,6 +2294,9 @@ static noinline struct module *load_module(void __user *umod, free_percpu: if (percpu) percpu_modfree(percpu); +#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) + percpu_modfree(mod->refptr); +#endif free_mod: kfree(args); free_hdr: |