diff options
Diffstat (limited to 'drivers/s390/block/dasd.c')
| -rw-r--r-- | drivers/s390/block/dasd.c | 1980 |
1 files changed, 1663 insertions, 317 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 2fd64e5a9ab..1eef0f58695 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1,12 +1,10 @@ /* - * File...........: linux/drivers/s390/block/dasd.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 - * + * Copyright IBM Corp. 1999, 2009 */ #define KMSG_COMPONENT "dasd" @@ -18,14 +16,18 @@ #include <linux/ctype.h> #include <linux/major.h> #include <linux/slab.h> -#include <linux/buffer_head.h> #include <linux/hdreg.h> +#include <linux/async.h> +#include <linux/mutex.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/vmalloc.h> #include <asm/ccwdev.h> #include <asm/ebcdic.h> #include <asm/idals.h> -#include <asm/todclk.h> #include <asm/itcw.h> +#include <asm/diag.h> /* This is ugly... */ #define PRINTK_HEADER "dasd:" @@ -40,12 +42,13 @@ * SECTION: exported variables of dasd.c */ debug_info_t *dasd_debug_area; +static struct dentry *dasd_debugfs_root_entry; struct dasd_discipline *dasd_diag_discipline_pointer; void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); MODULE_DESCRIPTION("Linux on S/390 DASD device driver," - " Copyright 2000 IBM Corporation"); + " Copyright IBM Corp. 2000"); MODULE_SUPPORTED_DEVICE("dasd"); MODULE_LICENSE("GPL"); @@ -60,9 +63,14 @@ static int dasd_flush_block_queue(struct dasd_block *); static void dasd_device_tasklet(struct dasd_device *); static void dasd_block_tasklet(struct dasd_block *); static void do_kick_device(struct work_struct *); +static void do_restore_device(struct work_struct *); +static void do_reload_device(struct work_struct *); static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); static void dasd_device_timeout(unsigned long); static void dasd_block_timeout(unsigned long); +static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); +static void dasd_profile_init(struct dasd_profile *, struct dentry *); +static void dasd_profile_exit(struct dasd_profile *); /* * SECTION: Operations on the device structure. @@ -70,6 +78,7 @@ static void dasd_block_timeout(unsigned long); static wait_queue_head_t dasd_init_waitq; static wait_queue_head_t dasd_flush_wq; static wait_queue_head_t generic_waitq; +static wait_queue_head_t shutdown_waitq; /* * Allocate memory for a new device structure. @@ -108,9 +117,12 @@ struct dasd_device *dasd_alloc_device(void) device->timer.function = dasd_device_timeout; device->timer.data = (unsigned long) device; INIT_WORK(&device->kick_work, do_kick_device); + INIT_WORK(&device->restore_device, do_restore_device); + INIT_WORK(&device->reload_device, do_reload_device); device->state = DASD_STATE_NEW; device->target = DASD_STATE_NEW; - + mutex_init(&device->state_mutex); + spin_lock_init(&device->profile.lock); return device; } @@ -148,6 +160,7 @@ struct dasd_block *dasd_alloc_block(void) init_timer(&block->timer); block->timer.function = dasd_block_timeout; block->timer.data = (unsigned long) block; + spin_lock_init(&block->profile.lock); return block; } @@ -211,19 +224,44 @@ static int dasd_state_known_to_new(struct dasd_device *device) return 0; } +static struct dentry *dasd_debugfs_setup(const char *name, + struct dentry *base_dentry) +{ + struct dentry *pde; + + if (!base_dentry) + return NULL; + pde = debugfs_create_dir(name, base_dentry); + if (!pde || IS_ERR(pde)) + return NULL; + return pde; +} + /* * Request the irq line for the device. */ static int dasd_state_known_to_basic(struct dasd_device *device) { - int rc; + struct dasd_block *block = device->block; + int rc = 0; /* Allocate and register gendisk structure. */ - if (device->block) { - rc = dasd_gendisk_alloc(device->block); + if (block) { + rc = dasd_gendisk_alloc(block); if (rc) return rc; - } + block->debugfs_dentry = + dasd_debugfs_setup(block->gdp->disk_name, + dasd_debugfs_root_entry); + dasd_profile_init(&block->profile, block->debugfs_dentry); + if (dasd_global_profile_level == DASD_PROFILE_ON) + dasd_profile_on(&device->block->profile); + } + device->debugfs_dentry = + dasd_debugfs_setup(dev_name(&device->cdev->dev), + dasd_debugfs_root_entry); + dasd_profile_init(&device->profile, device->debugfs_dentry); + /* register 'device' debug area, used for all DBF_DEV_XXX calls */ device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 8 * sizeof(long)); @@ -232,7 +270,8 @@ static int dasd_state_known_to_basic(struct dasd_device *device) DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); device->state = DASD_STATE_BASIC; - return 0; + + return rc; } /* @@ -241,7 +280,11 @@ static int dasd_state_known_to_basic(struct dasd_device *device) static int dasd_state_basic_to_known(struct dasd_device *device) { int rc; + if (device->block) { + dasd_profile_exit(&device->block->profile); + if (device->block->debugfs_dentry) + debugfs_remove(device->block->debugfs_dentry); dasd_gendisk_free(device->block); dasd_block_clear_timer(device->block); } @@ -249,6 +292,9 @@ static int dasd_state_basic_to_known(struct dasd_device *device) if (rc) return rc; dasd_device_clear_timer(device); + dasd_profile_exit(&device->profile); + if (device->debugfs_dentry) + debugfs_remove(device->debugfs_dentry); DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); if (device->debug_area != NULL) { @@ -285,8 +331,10 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) if (block->base->discipline->do_analysis != NULL) rc = block->base->discipline->do_analysis(block); if (rc) { - if (rc != -EAGAIN) + if (rc != -EAGAIN) { device->state = DASD_STATE_UNFMT; + goto out; + } return rc; } dasd_setup_queue(block); @@ -294,14 +342,29 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) block->blocks << block->s2b_shift); device->state = DASD_STATE_READY; rc = dasd_scan_partitions(block); - if (rc) + if (rc) { device->state = DASD_STATE_BASIC; + return rc; + } } else { device->state = DASD_STATE_READY; } +out: + if (device->discipline->basic_to_ready) + rc = device->discipline->basic_to_ready(device); return rc; } +static inline +int _wait_for_empty_queues(struct dasd_device *device) +{ + if (device->block) + return list_empty(&device->ccw_queue) && + list_empty(&device->block->ccw_queue); + else + return list_empty(&device->ccw_queue); +} + /* * Remove device from block device layer. Destroy dirty buffers. * Forget format information. Check if the target level is basic @@ -311,6 +374,11 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) { int rc; + if (device->discipline->ready_to_basic) { + rc = device->discipline->ready_to_basic(device); + if (rc) + return rc; + } device->state = DASD_STATE_BASIC; if (device->block) { struct dasd_block *block = device->block; @@ -319,8 +387,8 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) device->state = DASD_STATE_READY; return rc; } - dasd_destroy_partitions(block); dasd_flush_request_queue(block); + dasd_destroy_partitions(block); block->blocks = 0; block->bp_block = 0; block->s2b_shift = 0; @@ -345,19 +413,18 @@ static int dasd_state_unfmt_to_basic(struct dasd_device *device) static int dasd_state_ready_to_online(struct dasd_device * device) { - int rc; struct gendisk *disk; struct disk_part_iter piter; struct hd_struct *part; - if (device->discipline->ready_to_online) { - rc = device->discipline->ready_to_online(device); - if (rc) - return rc; - } device->state = DASD_STATE_ONLINE; if (device->block) { dasd_schedule_block_bh(device->block); + if ((device->features & DASD_FEATURE_USERAW)) { + disk = device->block->gdp; + kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); + return 0; + } disk = device->block->bdev->bd_disk; disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); while ((part = disk_part_iter_next(&piter))) @@ -382,8 +449,9 @@ static int dasd_state_online_to_ready(struct dasd_device *device) if (rc) return rc; } + device->state = DASD_STATE_READY; - if (device->block) { + if (device->block && !(device->features & DASD_FEATURE_USERAW)) { disk = device->block->bdev->bd_disk; disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); while ((part = disk_part_iter_next(&piter))) @@ -468,7 +536,7 @@ static int dasd_decrease_state(struct dasd_device *device) */ static void dasd_change_state(struct dasd_device *device) { - int rc; + int rc; if (device->state == device->target) /* Already where we want to go today... */ @@ -477,14 +545,16 @@ static void dasd_change_state(struct dasd_device *device) rc = dasd_increase_state(device); else rc = dasd_decrease_state(device); - if (rc && rc != -EAGAIN) - device->target = device->state; - - if (device->state == device->target) - wake_up(&dasd_init_waitq); + if (rc == -EAGAIN) + return; + if (rc) + device->target = device->state; /* let user-space know that the device status changed */ kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); + + if (device->state == device->target) + wake_up(&dasd_init_waitq); } /* @@ -496,7 +566,9 @@ static void dasd_change_state(struct dasd_device *device) static void do_kick_device(struct work_struct *work) { struct dasd_device *device = container_of(work, struct dasd_device, kick_work); + mutex_lock(&device->state_mutex); dasd_change_state(device); + mutex_unlock(&device->state_mutex); dasd_schedule_device_bh(device); dasd_put_device(device); } @@ -509,20 +581,63 @@ void dasd_kick_device(struct dasd_device *device) } /* + * dasd_reload_device will schedule a call do do_reload_device to the kernel + * event daemon. + */ +static void do_reload_device(struct work_struct *work) +{ + struct dasd_device *device = container_of(work, struct dasd_device, + reload_device); + device->discipline->reload(device); + dasd_put_device(device); +} + +void dasd_reload_device(struct dasd_device *device) +{ + dasd_get_device(device); + /* queue call to dasd_reload_device to the kernel event daemon. */ + schedule_work(&device->reload_device); +} +EXPORT_SYMBOL(dasd_reload_device); + +/* + * dasd_restore_device will schedule a call do do_restore_device to the kernel + * event daemon. + */ +static void do_restore_device(struct work_struct *work) +{ + struct dasd_device *device = container_of(work, struct dasd_device, + restore_device); + device->cdev->drv->restore(device->cdev); + dasd_put_device(device); +} + +void dasd_restore_device(struct dasd_device *device) +{ + dasd_get_device(device); + /* queue call to dasd_restore_device to the kernel event daemon. */ + schedule_work(&device->restore_device); +} + +/* * Set the target state for a device and starts the state change. */ void dasd_set_target_state(struct dasd_device *device, int target) { + dasd_get_device(device); + mutex_lock(&device->state_mutex); /* If we are in probeonly mode stop at DASD_STATE_READY. */ if (dasd_probeonly && target > DASD_STATE_READY) target = DASD_STATE_READY; if (device->target != target) { - if (device->state == target) + if (device->state == target) wake_up(&dasd_init_waitq); device->target = target; } if (device->state != device->target) dasd_change_state(device); + mutex_unlock(&device->state_mutex); + dasd_put_device(device); } /* @@ -541,26 +656,22 @@ void dasd_enable_device(struct dasd_device *device) dasd_set_target_state(device, DASD_STATE_NEW); /* Now wait for the devices to come up. */ wait_event(dasd_init_waitq, _wait_for_device(device)); + + dasd_reload_device(device); + if (device->discipline->kick_validate) + device->discipline->kick_validate(device); } /* * SECTION: device operation (interrupt handler, start i/o, term i/o ...) */ -#ifdef CONFIG_DASD_PROFILE -struct dasd_profile_info_t dasd_global_profile; -unsigned int dasd_profile_level = DASD_PROFILE_OFF; +unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; -/* - * Increments counter in global and local profiling structures. - */ -#define dasd_profile_counter(value, counter, block) \ -{ \ - int index; \ - for (index = 0; index < 31 && value >> (2+index); index++); \ - dasd_global_profile.counter[index]++; \ - block->profile.counter[index]++; \ -} +#ifdef CONFIG_DASD_PROFILE +struct dasd_profile_info dasd_global_profile_data; +static struct dentry *dasd_global_profile_dentry; +static struct dentry *dasd_debugfs_global_entry; /* * Add profiling information for cqr before execution. @@ -571,33 +682,125 @@ static void dasd_profile_start(struct dasd_block *block, { struct list_head *l; unsigned int counter; - - if (dasd_profile_level != DASD_PROFILE_ON) - return; + struct dasd_device *device; /* count the length of the chanq for statistics */ counter = 0; - list_for_each(l, &block->ccw_queue) - if (++counter >= 31) - break; - dasd_global_profile.dasd_io_nr_req[counter]++; - block->profile.dasd_io_nr_req[counter]++; + if (dasd_global_profile_level || block->profile.data) + list_for_each(l, &block->ccw_queue) + if (++counter >= 31) + break; + + if (dasd_global_profile_level) { + dasd_global_profile_data.dasd_io_nr_req[counter]++; + if (rq_data_dir(req) == READ) + dasd_global_profile_data.dasd_read_nr_req[counter]++; + } + + spin_lock(&block->profile.lock); + if (block->profile.data) { + block->profile.data->dasd_io_nr_req[counter]++; + if (rq_data_dir(req) == READ) + block->profile.data->dasd_read_nr_req[counter]++; + } + spin_unlock(&block->profile.lock); + + /* + * We count the request for the start device, even though it may run on + * some other device due to error recovery. This way we make sure that + * we count each request only once. + */ + device = cqr->startdev; + if (device->profile.data) { + counter = 1; /* request is not yet queued on the start device */ + list_for_each(l, &device->ccw_queue) + if (++counter >= 31) + break; + } + spin_lock(&device->profile.lock); + if (device->profile.data) { + device->profile.data->dasd_io_nr_req[counter]++; + if (rq_data_dir(req) == READ) + device->profile.data->dasd_read_nr_req[counter]++; + } + spin_unlock(&device->profile.lock); } /* * Add profiling information for cqr after execution. */ + +#define dasd_profile_counter(value, index) \ +{ \ + for (index = 0; index < 31 && value >> (2+index); index++) \ + ; \ +} + +static void dasd_profile_end_add_data(struct dasd_profile_info *data, + int is_alias, + int is_tpm, + int is_read, + long sectors, + int sectors_ind, + int tottime_ind, + int tottimeps_ind, + int strtime_ind, + int irqtime_ind, + int irqtimeps_ind, + int endtime_ind) +{ + /* in case of an overflow, reset the whole profile */ + if (data->dasd_io_reqs == UINT_MAX) { + memset(data, 0, sizeof(*data)); + getnstimeofday(&data->starttod); + } + data->dasd_io_reqs++; + data->dasd_io_sects += sectors; + if (is_alias) + data->dasd_io_alias++; + if (is_tpm) + data->dasd_io_tpm++; + + data->dasd_io_secs[sectors_ind]++; + data->dasd_io_times[tottime_ind]++; + data->dasd_io_timps[tottimeps_ind]++; + data->dasd_io_time1[strtime_ind]++; + data->dasd_io_time2[irqtime_ind]++; + data->dasd_io_time2ps[irqtimeps_ind]++; + data->dasd_io_time3[endtime_ind]++; + + if (is_read) { + data->dasd_read_reqs++; + data->dasd_read_sects += sectors; + if (is_alias) + data->dasd_read_alias++; + if (is_tpm) + data->dasd_read_tpm++; + data->dasd_read_secs[sectors_ind]++; + data->dasd_read_times[tottime_ind]++; + data->dasd_read_time1[strtime_ind]++; + data->dasd_read_time2[irqtime_ind]++; + data->dasd_read_time3[endtime_ind]++; + } +} + static void dasd_profile_end(struct dasd_block *block, struct dasd_ccw_req *cqr, struct request *req) { long strtime, irqtime, endtime, tottime; /* in microseconds */ long tottimeps, sectors; + struct dasd_device *device; + int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; + int irqtime_ind, irqtimeps_ind, endtime_ind; - if (dasd_profile_level != DASD_PROFILE_ON) + device = cqr->startdev; + if (!(dasd_global_profile_level || + block->profile.data || + device->profile.data)) return; - sectors = req->nr_sectors; + sectors = blk_rq_sectors(req); if (!cqr->buildclk || !cqr->startclk || !cqr->stopclk || !cqr->endclk || !sectors) @@ -609,29 +812,392 @@ static void dasd_profile_end(struct dasd_block *block, tottime = ((cqr->endclk - cqr->buildclk) >> 12); tottimeps = tottime / sectors; - if (!dasd_global_profile.dasd_io_reqs) - memset(&dasd_global_profile, 0, - sizeof(struct dasd_profile_info_t)); - dasd_global_profile.dasd_io_reqs++; - dasd_global_profile.dasd_io_sects += sectors; - - if (!block->profile.dasd_io_reqs) - memset(&block->profile, 0, - sizeof(struct dasd_profile_info_t)); - block->profile.dasd_io_reqs++; - block->profile.dasd_io_sects += sectors; - - dasd_profile_counter(sectors, dasd_io_secs, block); - dasd_profile_counter(tottime, dasd_io_times, block); - dasd_profile_counter(tottimeps, dasd_io_timps, block); - dasd_profile_counter(strtime, dasd_io_time1, block); - dasd_profile_counter(irqtime, dasd_io_time2, block); - dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); - dasd_profile_counter(endtime, dasd_io_time3, block); + dasd_profile_counter(sectors, sectors_ind); + dasd_profile_counter(tottime, tottime_ind); + dasd_profile_counter(tottimeps, tottimeps_ind); + dasd_profile_counter(strtime, strtime_ind); + dasd_profile_counter(irqtime, irqtime_ind); + dasd_profile_counter(irqtime / sectors, irqtimeps_ind); + dasd_profile_counter(endtime, endtime_ind); + + if (dasd_global_profile_level) { + dasd_profile_end_add_data(&dasd_global_profile_data, + cqr->startdev != block->base, + cqr->cpmode == 1, + rq_data_dir(req) == READ, + sectors, sectors_ind, tottime_ind, + tottimeps_ind, strtime_ind, + irqtime_ind, irqtimeps_ind, + endtime_ind); + } + + spin_lock(&block->profile.lock); + if (block->profile.data) + dasd_profile_end_add_data(block->profile.data, + cqr->startdev != block->base, + cqr->cpmode == 1, + rq_data_dir(req) == READ, + sectors, sectors_ind, tottime_ind, + tottimeps_ind, strtime_ind, + irqtime_ind, irqtimeps_ind, + endtime_ind); + spin_unlock(&block->profile.lock); + + spin_lock(&device->profile.lock); + if (device->profile.data) + dasd_profile_end_add_data(device->profile.data, + cqr->startdev != block->base, + cqr->cpmode == 1, + rq_data_dir(req) == READ, + sectors, sectors_ind, tottime_ind, + tottimeps_ind, strtime_ind, + irqtime_ind, irqtimeps_ind, + endtime_ind); + spin_unlock(&device->profile.lock); +} + +void dasd_profile_reset(struct dasd_profile *profile) +{ + struct dasd_profile_info *data; + + spin_lock_bh(&profile->lock); + data = profile->data; + if (!data) { + spin_unlock_bh(&profile->lock); + return; + } + memset(data, 0, sizeof(*data)); + getnstimeofday(&data->starttod); + spin_unlock_bh(&profile->lock); +} + +void dasd_global_profile_reset(void) +{ + memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data)); + getnstimeofday(&dasd_global_profile_data.starttod); +} + +int dasd_profile_on(struct dasd_profile *profile) +{ + struct dasd_profile_info *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + spin_lock_bh(&profile->lock); + if (profile->data) { + spin_unlock_bh(&profile->lock); + kfree(data); + return 0; + } + getnstimeofday(&data->starttod); + profile->data = data; + spin_unlock_bh(&profile->lock); + return 0; +} + +void dasd_profile_off(struct dasd_profile *profile) +{ + spin_lock_bh(&profile->lock); + kfree(profile->data); + profile->data = NULL; + spin_unlock_bh(&profile->lock); +} + +char *dasd_get_user_string(const char __user *user_buf, size_t user_len) +{ + char *buffer; + + buffer = vmalloc(user_len + 1); + if (buffer == NULL) + return ERR_PTR(-ENOMEM); + if (copy_from_user(buffer, user_buf, user_len) != 0) { + vfree(buffer); + return ERR_PTR(-EFAULT); + } + /* got the string, now strip linefeed. */ + if (buffer[user_len - 1] == '\n') + buffer[user_len - 1] = 0; + else + buffer[user_len] = 0; + return buffer; +} + +static ssize_t dasd_stats_write(struct file *file, + const char __user *user_buf, + size_t user_len, loff_t *pos) +{ + char *buffer, *str; + int rc; + struct seq_file *m = (struct seq_file *)file->private_data; + struct dasd_profile *prof = m->private; + + if (user_len > 65536) + user_len = 65536; + buffer = dasd_get_user_string(user_buf, user_len); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + + str = skip_spaces(buffer); + rc = user_len; + if (strncmp(str, "reset", 5) == 0) { + dasd_profile_reset(prof); + } else if (strncmp(str, "on", 2) == 0) { + rc = dasd_profile_on(prof); + if (!rc) + rc = user_len; + } else if (strncmp(str, "off", 3) == 0) { + dasd_profile_off(prof); + } else + rc = -EINVAL; + vfree(buffer); + return rc; +} + +static void dasd_stats_array(struct seq_file *m, unsigned int *array) +{ + int i; + + for (i = 0; i < 32; i++) + seq_printf(m, "%u ", array[i]); + seq_putc(m, '\n'); +} + +static void dasd_stats_seq_print(struct seq_file *m, + struct dasd_profile_info *data) +{ + seq_printf(m, "start_time %ld.%09ld\n", + data->starttod.tv_sec, data->starttod.tv_nsec); + seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); + seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); + seq_printf(m, "total_pav %u\n", data->dasd_io_alias); + seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); + seq_printf(m, "histogram_sectors "); + dasd_stats_array(m, data->dasd_io_secs); + seq_printf(m, "histogram_io_times "); + dasd_stats_array(m, data->dasd_io_times); + seq_printf(m, "histogram_io_times_weighted "); + dasd_stats_array(m, data->dasd_io_timps); + seq_printf(m, "histogram_time_build_to_ssch "); + dasd_stats_array(m, data->dasd_io_time1); + seq_printf(m, "histogram_time_ssch_to_irq "); + dasd_stats_array(m, data->dasd_io_time2); + seq_printf(m, "histogram_time_ssch_to_irq_weighted "); + dasd_stats_array(m, data->dasd_io_time2ps); + seq_printf(m, "histogram_time_irq_to_end "); + dasd_stats_array(m, data->dasd_io_time3); + seq_printf(m, "histogram_ccw_queue_length "); + dasd_stats_array(m, data->dasd_io_nr_req); + seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); + seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); + seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); + seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); + seq_printf(m, "histogram_read_sectors "); + dasd_stats_array(m, data->dasd_read_secs); + seq_printf(m, "histogram_read_times "); + dasd_stats_array(m, data->dasd_read_times); + seq_printf(m, "histogram_read_time_build_to_ssch "); + dasd_stats_array(m, data->dasd_read_time1); + seq_printf(m, "histogram_read_time_ssch_to_irq "); + dasd_stats_array(m, data->dasd_read_time2); + seq_printf(m, "histogram_read_time_irq_to_end "); + dasd_stats_array(m, data->dasd_read_time3); + seq_printf(m, "histogram_read_ccw_queue_length "); + dasd_stats_array(m, data->dasd_read_nr_req); +} + +static int dasd_stats_show(struct seq_file *m, void *v) +{ + struct dasd_profile *profile; + struct dasd_profile_info *data; + + profile = m->private; + spin_lock_bh(&profile->lock); + data = profile->data; + if (!data) { + spin_unlock_bh(&profile->lock); + seq_printf(m, "disabled\n"); + return 0; + } + dasd_stats_seq_print(m, data); + spin_unlock_bh(&profile->lock); + return 0; +} + +static int dasd_stats_open(struct inode *inode, struct file *file) +{ + struct dasd_profile *profile = inode->i_private; + return single_open(file, dasd_stats_show, profile); +} + +static const struct file_operations dasd_stats_raw_fops = { + .owner = THIS_MODULE, + .open = dasd_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = dasd_stats_write, +}; + +static ssize_t dasd_stats_global_write(struct file *file, + const char __user *user_buf, + size_t user_len, loff_t *pos) +{ + char *buffer, *str; + ssize_t rc; + + if (user_len > 65536) + user_len = 65536; + buffer = dasd_get_user_string(user_buf, user_len); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + str = skip_spaces(buffer); + rc = user_len; + if (strncmp(str, "reset", 5) == 0) { + dasd_global_profile_reset(); + } else if (strncmp(str, "on", 2) == 0) { + dasd_global_profile_reset(); + dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; + } else if (strncmp(str, "off", 3) == 0) { + dasd_global_profile_level = DASD_PROFILE_OFF; + } else + rc = -EINVAL; + vfree(buffer); + return rc; +} + +static int dasd_stats_global_show(struct seq_file *m, void *v) +{ + if (!dasd_global_profile_level) { + seq_printf(m, "disabled\n"); + return 0; + } + dasd_stats_seq_print(m, &dasd_global_profile_data); + return 0; +} + +static int dasd_stats_global_open(struct inode *inode, struct file *file) +{ + return single_open(file, dasd_stats_global_show, NULL); +} + +static const struct file_operations dasd_stats_global_fops = { + .owner = THIS_MODULE, + .open = dasd_stats_global_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = dasd_stats_global_write, +}; + +static void dasd_profile_init(struct dasd_profile *profile, + struct dentry *base_dentry) +{ + umode_t mode; + struct dentry *pde; + + if (!base_dentry) + return; + profile->dentry = NULL; + profile->data = NULL; + mode = (S_IRUSR | S_IWUSR | S_IFREG); + pde = debugfs_create_file("statistics", mode, base_dentry, + profile, &dasd_stats_raw_fops); + if (pde && !IS_ERR(pde)) + profile->dentry = pde; + return; +} + +static void dasd_profile_exit(struct dasd_profile *profile) +{ + dasd_profile_off(profile); + if (profile->dentry) { + debugfs_remove(profile->dentry); + profile->dentry = NULL; + } } + +static void dasd_statistics_removeroot(void) +{ + dasd_global_profile_level = DASD_PROFILE_OFF; + if (dasd_global_profile_dentry) { + debugfs_remove(dasd_global_profile_dentry); + dasd_global_profile_dentry = NULL; + } + if (dasd_debugfs_global_entry) + debugfs_remove(dasd_debugfs_global_entry); + if (dasd_debugfs_root_entry) + debugfs_remove(dasd_debugfs_root_entry); +} + +static void dasd_statistics_createroot(void) +{ + umode_t mode; + struct dentry *pde; + + dasd_debugfs_root_entry = NULL; + dasd_debugfs_global_entry = NULL; + dasd_global_profile_dentry = NULL; + pde = debugfs_create_dir("dasd", NULL); + if (!pde || IS_ERR(pde)) + goto error; + dasd_debugfs_root_entry = pde; + pde = debugfs_create_dir("global", dasd_debugfs_root_entry); + if (!pde || IS_ERR(pde)) + goto error; + dasd_debugfs_global_entry = pde; + + mode = (S_IRUSR | S_IWUSR | S_IFREG); + pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry, + NULL, &dasd_stats_global_fops); + if (!pde || IS_ERR(pde)) + goto error; + dasd_global_profile_dentry = pde; + return; + +error: + DBF_EVENT(DBF_ERR, "%s", + "Creation of the dasd debugfs interface failed"); + dasd_statistics_removeroot(); + return; +} + #else #define dasd_profile_start(block, cqr, req) do {} while (0) #define dasd_profile_end(block, cqr, req) do {} while (0) + +static void dasd_statistics_createroot(void) +{ + return; +} + +static void dasd_statistics_removeroot(void) +{ + return; +} + +int dasd_stats_generic_show(struct seq_file *m, void *v) +{ + seq_printf(m, "Statistics are not activated in this kernel\n"); + return 0; +} + +static void dasd_profile_init(struct dasd_profile *profile, + struct dentry *base_dentry) +{ + return; +} + +static void dasd_profile_exit(struct dasd_profile *profile) +{ + return; +} + +int dasd_profile_on(struct dasd_profile *profile) +{ + return 0; +} + #endif /* CONFIG_DASD_PROFILE */ /* @@ -641,14 +1207,14 @@ static void dasd_profile_end(struct dasd_block *block, * memory and 2) dasd_smalloc_request uses the static ccw memory * that gets allocated for each device. */ -struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, +struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, int datasize, struct dasd_device *device) { struct dasd_ccw_req *cqr; /* Sanity checks */ - BUG_ON( magic == NULL || datasize > PAGE_SIZE || + BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE); cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); @@ -672,14 +1238,13 @@ struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, return ERR_PTR(-ENOMEM); } } - strncpy((char *) &cqr->magic, magic, 4); - ASCEBC((char *) &cqr->magic, 4); + cqr->magic = magic; set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); dasd_get_device(device); return cqr; } -struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, +struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, struct dasd_device *device) { @@ -688,10 +1253,6 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, char *data; int size; - /* Sanity checks */ - BUG_ON( magic == NULL || datasize > PAGE_SIZE || - (cplength*sizeof(struct ccw1)) > PAGE_SIZE); - size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; if (cplength > 0) size += cplength * sizeof(struct ccw1); @@ -716,8 +1277,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, cqr->data = data; memset(cqr->data, 0, datasize); } - strncpy((char *) &cqr->magic, magic, 4); - ASCEBC((char *) &cqr->magic, 4); + cqr->magic = magic; set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); dasd_get_device(device); return cqr; @@ -798,9 +1358,8 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) rc = ccw_device_clear(device->cdev, (long) cqr); switch (rc) { case 0: /* termination successful */ - cqr->retries--; cqr->status = DASD_CQR_CLEAR_PENDING; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->starttime = 0; DBF_DEV_EVENT(DBF_DEBUG, device, "terminate cqr %p successful", @@ -845,9 +1404,21 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) /* Check the cqr */ rc = dasd_check_cqr(cqr); - if (rc) + if (rc) { + cqr->intrc = rc; return rc; + } device = (struct dasd_device *) cqr->startdev; + if (((cqr->block && + test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || + test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " + "because of stolen lock", cqr); + cqr->status = DASD_CQR_ERROR; + cqr->intrc = -EPERM; + return -EPERM; + } if (cqr->retries < 0) { /* internal error 14 - start_IO run out of retries */ sprintf(errorstring, "14 %p", cqr); @@ -856,9 +1427,14 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) cqr->status = DASD_CQR_ERROR; return -EIO; } - cqr->startclk = get_clock(); + cqr->startclk = get_tod_clock(); cqr->starttime = jiffies; cqr->retries--; + if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { + cqr->lpm &= device->path_data.opm; + if (!cqr->lpm) + cqr->lpm = device->path_data.opm; + } if (cqr->cpmode == 1) { rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, (long) cqr, cqr->lpm); @@ -869,37 +1445,58 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) switch (rc) { case 0: cqr->status = DASD_CQR_IN_IO; - DBF_DEV_EVENT(DBF_DEBUG, device, - "start_IO: request %p started successful", - cqr); break; case -EBUSY: - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: device busy, retry later"); break; case -ETIMEDOUT: - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: request timeout, retry later"); break; case -EACCES: - /* -EACCES indicates that the request used only a - * subset of the available pathes and all these - * pathes are gone. - * Do a retry with all available pathes. + /* -EACCES indicates that the request used only a subset of the + * available paths and all these paths are gone. If the lpm of + * this request was only a subset of the opm (e.g. the ppm) then + * we just do a retry with all available paths. + * If we already use the full opm, something is amiss, and we + * need a full path verification. */ - cqr->lpm = LPM_ANYPATH; - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", - "start_IO: selected pathes gone," - " retry on all pathes"); + if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { + DBF_DEV_EVENT(DBF_WARNING, device, + "start_IO: selected paths gone (%x)", + cqr->lpm); + } else if (cqr->lpm != device->path_data.opm) { + cqr->lpm = device->path_data.opm; + DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + "start_IO: selected paths gone," + " retry on all paths"); + } else { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "start_IO: all paths in opm gone," + " do path verification"); + dasd_generic_last_path_gone(device); + device->path_data.opm = 0; + device->path_data.ppm = 0; + device->path_data.npm = 0; + device->path_data.tbvpm = + ccw_device_get_path_mask(device->cdev); + } break; case -ENODEV: - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: -ENODEV device gone, retry"); break; case -EIO: - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: -EIO device gone, retry"); break; + case -EINVAL: + /* most likely caused in power management context */ + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "start_IO: -EINVAL device currently " + "not accessible"); + break; default: /* internal error 11 - unknown rc */ snprintf(errorstring, ERRORLENGTH, "11 %d", rc); @@ -909,6 +1506,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) BUG(); break; } + cqr->intrc = rc; return rc; } @@ -928,7 +1526,7 @@ static void dasd_device_timeout(unsigned long ptr) device = (struct dasd_device *) ptr; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); /* re-activate request queue */ - device->stopped &= ~DASD_STOPPED_PENDING; + dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_schedule_device_bh(device); } @@ -962,19 +1560,26 @@ static void dasd_handle_killed_request(struct ccw_device *cdev, return; cqr = (struct dasd_ccw_req *) intparm; if (cqr->status != DASD_CQR_IN_IO) { - DBF_EVENT(DBF_DEBUG, - "invalid status in handle_killed_request: " - "bus_id %s, status %02x", - dev_name(&cdev->dev), cqr->status); + DBF_EVENT_DEVID(DBF_DEBUG, cdev, + "invalid status in handle_killed_request: " + "%02x", cqr->status); return; } - device = (struct dasd_device *) cqr->startdev; - if (device == NULL || - device != dasd_device_from_cdev_locked(cdev) || - strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { - DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " - "bus_id %s", dev_name(&cdev->dev)); + device = dasd_device_from_cdev_locked(cdev); + if (IS_ERR(device)) { + DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", + "unable to get device from cdev"); + return; + } + + if (!cqr->startdev || + device != cqr->startdev || + strncmp(cqr->startdev->discipline->ebcname, + (char *) &cqr->magic, 4)) { + DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", + "invalid device in request"); + dasd_put_device(device); return; } @@ -991,7 +1596,7 @@ void dasd_generic_handle_state_change(struct dasd_device *device) /* First of all start sense subsystem status request. */ dasd_eer_snss(device); - device->stopped &= ~DASD_STOPPED_PENDING; + dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); dasd_schedule_device_bh(device); if (device->block) dasd_schedule_block_bh(device->block); @@ -1013,41 +1618,48 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, case -EIO: break; case -ETIMEDOUT: - DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n", - __func__, dev_name(&cdev->dev)); + DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " + "request timed out\n", __func__); break; default: - DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n", - __func__, dev_name(&cdev->dev), PTR_ERR(irb)); + DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " + "unknown error %ld\n", __func__, + PTR_ERR(irb)); } dasd_handle_killed_request(cdev, intparm); return; } - now = get_clock(); - - /* check for unsolicited interrupts */ + now = get_tod_clock(); cqr = (struct dasd_ccw_req *) intparm; - if (!cqr || ((scsw_cc(&irb->scsw) == 1) && - (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && - (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) { - if (cqr && cqr->status == DASD_CQR_IN_IO) - cqr->status = DASD_CQR_QUEUED; + /* check for conditions that should be handled immediately */ + if (!cqr || + !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && + scsw_cstat(&irb->scsw) == 0)) { + if (cqr) + memcpy(&cqr->irb, irb, sizeof(*irb)); device = dasd_device_from_cdev_locked(cdev); - if (!IS_ERR(device)) { - dasd_device_clear_timer(device); - device->discipline->handle_unsolicited_interrupt(device, - irb); + if (IS_ERR(device)) + return; + /* ignore unsolicited interrupts for DIAG discipline */ + if (device->discipline == dasd_diag_discipline_pointer) { dasd_put_device(device); + return; } - return; + device->discipline->dump_sense_dbf(device, irb, "int"); + if (device->features & DASD_FEATURE_ERPLOG) + device->discipline->dump_sense(device, cqr, irb); + device->discipline->check_for_device_change(device, cqr, irb); + dasd_put_device(device); } + if (!cqr) + return; device = (struct dasd_device *) cqr->startdev; if (!device || strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { - DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " - "bus_id %s", dev_name(&cdev->dev)); + DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", + "invalid device in request"); return; } @@ -1081,25 +1693,19 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, struct dasd_ccw_req, devlist); } } else { /* error */ - memcpy(&cqr->irb, irb, sizeof(struct irb)); - /* log sense for every failed I/O to s390 debugfeature */ - dasd_log_sense_dbf(cqr, irb); - if (device->features & DASD_FEATURE_ERPLOG) { - dasd_log_sense(cqr, irb); - } - /* * If we don't want complex ERP for this request, then just * reset this and retry it in the fastpath */ if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && cqr->retries > 0) { - if (cqr->lpm == LPM_ANYPATH) + if (cqr->lpm == device->path_data.opm) DBF_DEV_EVENT(DBF_DEBUG, device, "default ERP in fastpath " "(%i retries left)", cqr->retries); - cqr->lpm = LPM_ANYPATH; + if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) + cqr->lpm = device->path_data.opm; cqr->status = DASD_CQR_QUEUED; next = cqr; } else @@ -1117,6 +1723,29 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, dasd_schedule_device_bh(device); } +enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) +{ + struct dasd_device *device; + + device = dasd_device_from_cdev_locked(cdev); + + if (IS_ERR(device)) + goto out; + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || + device->state != device->target || + !device->discipline->check_for_device_change){ + dasd_put_device(device); + goto out; + } + if (device->discipline->dump_sense_dbf) + device->discipline->dump_sense_dbf(device, irb, "uc"); + device->discipline->check_for_device_change(device, NULL, irb); + dasd_put_device(device); +out: + return UC_TODO_RETRY; +} +EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); + /* * If we have an error on a dasd_block layer request then we cancel * and return all further requests from the same dasd_block as well. @@ -1156,11 +1785,11 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device, list_for_each_safe(l, n, &device->ccw_queue) { cqr = list_entry(l, struct dasd_ccw_req, devlist); - /* Stop list processing at the first non-final request. */ + /* Skip any non-final request. */ if (cqr->status == DASD_CQR_QUEUED || cqr->status == DASD_CQR_IN_IO || cqr->status == DASD_CQR_CLEAR_PENDING) - break; + continue; if (cqr->status == DASD_CQR_ERROR) { __dasd_device_recovery(device, cqr); } @@ -1229,17 +1858,24 @@ static void __dasd_device_check_expire(struct dasd_device *device) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { + if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* + * IO in safe offline processing should not + * run out of retries + */ + cqr->retries++; + } if (device->discipline->term_IO(cqr) != 0) { /* Hmpf, try again in 5 sec */ dev_err(&device->cdev->dev, - "cqr %p timed out (%is) but cannot be " + "cqr %p timed out (%lus) but cannot be " "ended, retrying in 5 s\n", cqr, (cqr->expires/HZ)); cqr->expires += 5*HZ; dasd_device_set_timer(device, 5*HZ); } else { dev_err(&device->cdev->dev, - "cqr %p timed out (%is), %i retries " + "cqr %p timed out (%lus), %i retries " "remaining\n", cqr, (cqr->expires/HZ), cqr->retries); } @@ -1260,8 +1896,14 @@ static void __dasd_device_start_head(struct dasd_device *device) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if (cqr->status != DASD_CQR_QUEUED) return; - /* when device is stopped, return request to previous layer */ - if (device->stopped) { + /* when device is stopped, return request to previous layer + * exception: only the disconnect or unresumed bits are set and the + * cqr is a path verification request + */ + if (device->stopped && + !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) + && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { + cqr->intrc = -EAGAIN; cqr->status = DASD_CQR_CLEARED; dasd_schedule_device_bh(device); return; @@ -1277,6 +1919,23 @@ static void __dasd_device_start_head(struct dasd_device *device) dasd_device_set_timer(device, 50); } +static void __dasd_device_check_path_events(struct dasd_device *device) +{ + int rc; + + if (device->path_data.tbvpm) { + if (device->stopped & ~(DASD_STOPPED_DC_WAIT | + DASD_UNRESUMED_PM)) + return; + rc = device->discipline->verify_path( + device, device->path_data.tbvpm); + if (rc) + dasd_device_set_timer(device, 50); + else + device->path_data.tbvpm = 0; + } +}; + /* * Go through all request on the dasd_device request queue, * terminate them on the cdev if necessary, and return them to the @@ -1311,7 +1970,7 @@ int dasd_flush_device_queue(struct dasd_device *device) } break; case DASD_CQR_QUEUED: - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->status = DASD_CQR_CLEARED; break; default: /* no need to modify the others */ @@ -1351,6 +2010,7 @@ static void dasd_device_tasklet(struct dasd_device *device) __dasd_device_check_expire(device); /* find final requests on ccw queue */ __dasd_device_process_ccw_queue(device, &final_queue); + __dasd_device_check_path_events(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); /* Now call the callback function of requests with final status */ __dasd_device_process_final_queue(device, &final_queue); @@ -1358,6 +2018,8 @@ static void dasd_device_tasklet(struct dasd_device *device) /* Now check if the head of the ccw queue needs to be started. */ __dasd_device_start_head(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); + if (waitqueue_active(&shutdown_waitq)) + wake_up(&shutdown_waitq); dasd_put_device(device); } @@ -1373,6 +2035,20 @@ void dasd_schedule_device_bh(struct dasd_device *device) tasklet_hi_schedule(&device->tasklet); } +void dasd_device_set_stop_bits(struct dasd_device *device, int bits) +{ + device->stopped |= bits; +} +EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); + +void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) +{ + device->stopped &= ~bits; + if (!device->stopped) + wake_up(&generic_waitq); +} +EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); + /* * Queue a request to the head of the device ccw_queue. * Start the I/O if possible. @@ -1412,10 +2088,14 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr) /* * Wakeup helper for the 'sleep_on' functions. */ -static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) +void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) { - wake_up((wait_queue_head_t *) data); + spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); + cqr->callback_data = DASD_SLEEPON_END_TAG; + spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); + wake_up(&generic_waitq); } +EXPORT_SYMBOL_GPL(dasd_wakeup_cb); static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) { @@ -1424,56 +2104,228 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) device = cqr->startdev; spin_lock_irq(get_ccwdev_lock(device->cdev)); - rc = ((cqr->status == DASD_CQR_DONE || - cqr->status == DASD_CQR_NEED_ERP || - cqr->status == DASD_CQR_TERMINATED) && - list_empty(&cqr->devlist)); + rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; } /* - * Queue a request to the tail of the device ccw_queue and wait for - * it's completion. + * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. */ -int dasd_sleep_on(struct dasd_ccw_req *cqr) +static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) { struct dasd_device *device; - int rc; + dasd_erp_fn_t erp_fn; + if (cqr->status == DASD_CQR_FILLED) + return 0; device = cqr->startdev; + if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { + if (cqr->status == DASD_CQR_TERMINATED) { + device->discipline->handle_terminated_request(cqr); + return 1; + } + if (cqr->status == DASD_CQR_NEED_ERP) { + erp_fn = device->discipline->erp_action(cqr); + erp_fn(cqr); + return 1; + } + if (cqr->status == DASD_CQR_FAILED) + dasd_log_sense(cqr, &cqr->irb); + if (cqr->refers) { + __dasd_process_erp(device, cqr); + return 1; + } + } + return 0; +} - cqr->callback = dasd_wakeup_cb; - cqr->callback_data = (void *) &generic_waitq; - dasd_add_request_tail(cqr); - wait_event(generic_waitq, _wait_for_wakeup(cqr)); +static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) +{ + if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { + if (cqr->refers) /* erp is not done yet */ + return 1; + return ((cqr->status != DASD_CQR_DONE) && + (cqr->status != DASD_CQR_FAILED)); + } else + return (cqr->status == DASD_CQR_FILLED); +} - /* Request status is either done or failed. */ - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; +static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) +{ + struct dasd_device *device; + int rc; + struct list_head ccw_queue; + struct dasd_ccw_req *cqr; + + INIT_LIST_HEAD(&ccw_queue); + maincqr->status = DASD_CQR_FILLED; + device = maincqr->startdev; + list_add(&maincqr->blocklist, &ccw_queue); + for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); + cqr = list_first_entry(&ccw_queue, + struct dasd_ccw_req, blocklist)) { + + if (__dasd_sleep_on_erp(cqr)) + continue; + if (cqr->status != DASD_CQR_FILLED) /* could be failed */ + continue; + if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EPERM; + continue; + } + /* Non-temporary stop condition will trigger fail fast */ + if (device->stopped & ~DASD_STOPPED_PENDING && + test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && + (!dasd_eer_enabled(device))) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -ENOLINK; + continue; + } + /* Don't try to start requests if device is stopped */ + if (interruptible) { + rc = wait_event_interruptible( + generic_waitq, !(device->stopped)); + if (rc == -ERESTARTSYS) { + cqr->status = DASD_CQR_FAILED; + maincqr->intrc = rc; + continue; + } + } else + wait_event(generic_waitq, !(device->stopped)); + + if (!cqr->callback) + cqr->callback = dasd_wakeup_cb; + + cqr->callback_data = DASD_SLEEPON_START_TAG; + dasd_add_request_tail(cqr); + if (interruptible) { + rc = wait_event_interruptible( + generic_waitq, _wait_for_wakeup(cqr)); + if (rc == -ERESTARTSYS) { + dasd_cancel_req(cqr); + /* wait (non-interruptible) for final status */ + wait_event(generic_waitq, + _wait_for_wakeup(cqr)); + cqr->status = DASD_CQR_FAILED; + maincqr->intrc = rc; + continue; + } + } else + wait_event(generic_waitq, _wait_for_wakeup(cqr)); + } + + maincqr->endclk = get_tod_clock(); + if ((maincqr->status != DASD_CQR_DONE) && + (maincqr->intrc != -ERESTARTSYS)) + dasd_log_sense(maincqr, &maincqr->irb); + if (maincqr->status == DASD_CQR_DONE) + rc = 0; + else if (maincqr->intrc) + rc = maincqr->intrc; + else + rc = -EIO; return rc; } +static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) +{ + struct dasd_ccw_req *cqr; + + list_for_each_entry(cqr, ccw_queue, blocklist) { + if (cqr->callback_data != DASD_SLEEPON_END_TAG) + return 0; + } + + return 1; +} + +static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) +{ + struct dasd_device *device; + int rc; + struct dasd_ccw_req *cqr, *n; + +retry: + list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { + device = cqr->startdev; + if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ + continue; + + if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EPERM; + continue; + } + /*Non-temporary stop condition will trigger fail fast*/ + if (device->stopped & ~DASD_STOPPED_PENDING && + test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && + !dasd_eer_enabled(device)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EAGAIN; + continue; + } + + /*Don't try to start requests if device is stopped*/ + if (interruptible) { + rc = wait_event_interruptible( + generic_waitq, !device->stopped); + if (rc == -ERESTARTSYS) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = rc; + continue; + } + } else + wait_event(generic_waitq, !(device->stopped)); + + if (!cqr->callback) + cqr->callback = dasd_wakeup_cb; + cqr->callback_data = DASD_SLEEPON_START_TAG; + dasd_add_request_tail(cqr); + } + + wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); + + rc = 0; + list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { + if (__dasd_sleep_on_erp(cqr)) + rc = 1; + } + if (rc) + goto retry; + + + return 0; +} + +/* + * Queue a request to the tail of the device ccw_queue and wait for + * it's completion. + */ +int dasd_sleep_on(struct dasd_ccw_req *cqr) +{ + return _dasd_sleep_on(cqr, 0); +} + +/* + * Start requests from a ccw_queue and wait for their completion. + */ +int dasd_sleep_on_queue(struct list_head *ccw_queue) +{ + return _dasd_sleep_on_queue(ccw_queue, 0); +} +EXPORT_SYMBOL(dasd_sleep_on_queue); + /* * Queue a request to the tail of the device ccw_queue and wait * interruptible for it's completion. */ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) { - struct dasd_device *device; - int rc; - - device = cqr->startdev; - cqr->callback = dasd_wakeup_cb; - cqr->callback_data = (void *) &generic_waitq; - dasd_add_request_tail(cqr); - rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr)); - if (rc == -ERESTARTSYS) { - dasd_cancel_req(cqr); - /* wait (non-interruptible) for final status */ - wait_event(generic_waitq, _wait_for_wakeup(cqr)); - } - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; - return rc; + return _dasd_sleep_on(cqr, 1); } /* @@ -1485,11 +2337,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) static inline int _dasd_term_running_cqr(struct dasd_device *device) { struct dasd_ccw_req *cqr; + int rc; if (list_empty(&device->ccw_queue)) return 0; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); - return device->discipline->term_IO(cqr); + rc = device->discipline->term_IO(cqr); + if (!rc) + /* + * CQR terminated because a more important request is pending. + * Undo decreasing of retry counter because this is + * not an error case. + */ + cqr->retries++; + return rc; } int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) @@ -1498,17 +2359,26 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) int rc; device = cqr->startdev; + if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EPERM; + return -EIO; + } spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = _dasd_term_running_cqr(device); if (rc) { spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; } - cqr->callback = dasd_wakeup_cb; - cqr->callback_data = (void *) &generic_waitq; + cqr->callback_data = DASD_SLEEPON_START_TAG; cqr->status = DASD_CQR_QUEUED; - list_add(&cqr->devlist, &device->ccw_queue); + /* + * add new request as second + * first the terminated cqr needs to be finished + */ + list_add(&cqr->devlist, device->ccw_queue.next); /* let the bh start the request to keep them in order */ dasd_schedule_device_bh(device); @@ -1517,8 +2387,18 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) wait_event(generic_waitq, _wait_for_wakeup(cqr)); - /* Request status is either done or failed. */ - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; + if (cqr->status == DASD_CQR_DONE) + rc = 0; + else if (cqr->intrc) + rc = cqr->intrc; + else + rc = -EIO; + + /* kick tasklets */ + dasd_schedule_device_bh(device); + if (device->block) + dasd_schedule_block_bh(device->block); + return rc; } @@ -1526,8 +2406,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) * Cancels a request that was started with dasd_sleep_on_req. * This is useful to timeout requests. The request will be * terminated if it is currently in i/o. - * Returns 1 if the request has been terminated. - * 0 if there was no need to terminate the request (not started yet) + * Returns 0 if request termination was successful * negative error code if termination failed * Cancellation of a request is an asynchronous operation! The calling * function has to wait until the request is properly returned via callback. @@ -1553,8 +2432,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) "Cancelling request %p failed with rc=%d\n", cqr, rc); } else { - cqr->stopclk = get_clock(); - rc = 1; + cqr->stopclk = get_tod_clock(); } break; default: /* already finished or clear pending - do nothing */ @@ -1565,7 +2443,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) return rc; } - /* * SECTION: Operations of the dasd_block layer. */ @@ -1583,7 +2460,7 @@ static void dasd_block_timeout(unsigned long ptr) block = (struct dasd_block *) ptr; spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); /* re-activate request queue */ - block->base->stopped &= ~DASD_STOPPED_PENDING; + dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); dasd_schedule_block_bh(block); } @@ -1608,22 +2485,12 @@ void dasd_block_clear_timer(struct dasd_block *block) } /* - * posts the buffer_cache about a finalized request - */ -static inline void dasd_end_request(struct request *req, int error) -{ - if (__blk_end_request(req, error, blk_rq_bytes(req))) - BUG(); -} - -/* * Process finished error recovery ccw. */ -static inline void __dasd_block_process_erp(struct dasd_block *block, - struct dasd_ccw_req *cqr) +static void __dasd_process_erp(struct dasd_device *device, + struct dasd_ccw_req *cqr) { dasd_erp_fn_t erp_fn; - struct dasd_device *device = block->base; if (cqr->status == DASD_CQR_DONE) DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); @@ -1656,21 +2523,30 @@ static void __dasd_process_request_queue(struct dasd_block *block) * for that. State DASD_STATE_ONLINE is normal block device * operation. */ - if (basedev->state < DASD_STATE_READY) + if (basedev->state < DASD_STATE_READY) { + while ((req = blk_fetch_request(block->request_queue))) + __blk_end_request_all(req, -EIO); return; + } /* Now we try to fetch requests from the request queue */ - while (!blk_queue_plugged(queue) && - elv_next_request(queue)) { - - req = elv_next_request(queue); - + while ((req = blk_peek_request(queue))) { if (basedev->features & DASD_FEATURE_READONLY && rq_data_dir(req) == WRITE) { DBF_DEV_EVENT(DBF_ERR, basedev, "Rejecting write request %p", req); - blkdev_dequeue_request(req); - dasd_end_request(req, -EIO); + blk_start_request(req); + __blk_end_request_all(req, -EIO); + continue; + } + if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && + (basedev->features & DASD_FEATURE_FAILFAST || + blk_noretry_request(req))) { + DBF_DEV_EVENT(DBF_ERR, basedev, + "Rejecting failfast request %p", + req); + blk_start_request(req); + __blk_end_request_all(req, -ETIMEDOUT); continue; } cqr = basedev->discipline->build_cp(basedev, block, req); @@ -1688,9 +2564,12 @@ static void __dasd_process_request_queue(struct dasd_block *block) */ if (!list_empty(&block->ccw_queue)) break; - spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); - basedev->stopped |= DASD_STOPPED_PENDING; - spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); + spin_lock_irqsave( + get_ccwdev_lock(basedev->cdev), flags); + dasd_device_set_stop_bits(basedev, + DASD_STOPPED_PENDING); + spin_unlock_irqrestore( + get_ccwdev_lock(basedev->cdev), flags); dasd_block_set_timer(block, HZ/2); break; } @@ -1698,8 +2577,8 @@ static void __dasd_process_request_queue(struct dasd_block *block) "CCW creation failed (rc=%ld) " "on request %p", PTR_ERR(cqr), req); - blkdev_dequeue_request(req); - dasd_end_request(req, -EIO); + blk_start_request(req); + __blk_end_request_all(req, -EIO); continue; } /* @@ -1708,8 +2587,10 @@ static void __dasd_process_request_queue(struct dasd_block *block) */ cqr->callback_data = (void *) req; cqr->status = DASD_CQR_FILLED; - blkdev_dequeue_request(req); + req->completion_data = cqr; + blk_start_request(req); list_add_tail(&cqr->blocklist, &block->ccw_queue); + INIT_LIST_HEAD(&cqr->devlist); dasd_profile_start(block, cqr, req); } } @@ -1723,9 +2604,18 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) req = (struct request *) cqr->callback_data; dasd_profile_end(cqr->block, cqr, req); status = cqr->block->base->discipline->free_cp(cqr, req); - if (status <= 0) - error = status ? status : -EIO; - dasd_end_request(req, error); + if (status < 0) + error = status; + else if (status == 0) { + if (cqr->intrc == -EPERM) + error = -EBADE; + else if (cqr->intrc == -ENOLINK || + cqr->intrc == -ETIMEDOUT) + error = cqr->intrc; + else + error = -EIO; + } + __blk_end_request_all(req, error); } /* @@ -1758,7 +2648,8 @@ restart: /* Process requests that may be recovered */ if (cqr->status == DASD_CQR_NEED_ERP) { erp_fn = base->discipline->erp_action(cqr); - erp_fn(cqr); + if (IS_ERR(erp_fn(cqr))) + continue; goto restart; } @@ -1776,7 +2667,7 @@ restart: cqr->status = DASD_CQR_FILLED; cqr->retries = 255; spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); - base->stopped |= DASD_STOPPED_QUIESCE; + dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); goto restart; @@ -1784,12 +2675,12 @@ restart: /* Process finished ERP request. */ if (cqr->refers) { - __dasd_block_process_erp(block, cqr); + __dasd_process_erp(base, cqr); goto restart; } /* Rechain finished requests to final queue */ - cqr->endclk = get_clock(); + cqr->endclk = get_tod_clock(); list_move_tail(&cqr->blocklist, final_queue); } } @@ -1812,11 +2703,19 @@ static void __dasd_block_start_head(struct dasd_block *block) list_for_each_entry(cqr, &block->ccw_queue, blocklist) { if (cqr->status != DASD_CQR_FILLED) continue; + if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EPERM; + dasd_schedule_block_bh(block); + continue; + } /* Non-temporary stop condition will trigger fail fast */ if (block->base->stopped & ~DASD_STOPPED_PENDING && test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && (!dasd_eer_enabled(block->base))) { cqr->status = DASD_CQR_FAILED; + cqr->intrc = -ENOLINK; dasd_schedule_block_bh(block); continue; } @@ -1866,6 +2765,8 @@ static void dasd_block_tasklet(struct dasd_block *block) __dasd_block_start_head(block); spin_unlock(&block->queue_lock); spin_unlock_irq(&block->request_queue_lock); + if (waitqueue_active(&shutdown_waitq)) + wake_up(&shutdown_waitq); dasd_put_device(block->base); } @@ -1875,6 +2776,26 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) } /* + * Requeue a request back to the block request queue + * only works for block requests + */ +static int _dasd_requeue_request(struct dasd_ccw_req *cqr) +{ + struct dasd_block *block = cqr->block; + struct request *req; + unsigned long flags; + + if (!block) + return -EINVAL; + spin_lock_irqsave(&block->queue_lock, flags); + req = (struct request *) cqr->callback_data; + blk_requeue_request(block->request_queue, req); + spin_unlock_irqrestore(&block->queue_lock, flags); + + return 0; +} + +/* * Go through all request on the dasd_block request queue, cancel them * on the respective dasd_device, and return them to the generic * block layer. @@ -1915,7 +2836,7 @@ restart_cb: /* Process finished ERP request. */ if (cqr->refers) { spin_lock_bh(&block->queue_lock); - __dasd_block_process_erp(block, cqr); + __dasd_process_erp(block->base, cqr); spin_unlock_bh(&block->queue_lock); /* restart list_for_xx loop since dasd_process_erp * might remove multiple elements */ @@ -1923,7 +2844,7 @@ restart_cb: } /* call the callback function */ spin_lock_irq(&block->request_queue_lock); - cqr->endclk = get_clock(); + cqr->endclk = get_tod_clock(); list_del_init(&cqr->blocklist); __dasd_cleanup_cqr(cqr); spin_unlock_irq(&block->request_queue_lock); @@ -1967,6 +2888,82 @@ static void do_dasd_request(struct request_queue *queue) } /* + * Block timeout callback, called from the block layer + * + * request_queue lock is held on entry. + * + * Return values: + * BLK_EH_RESET_TIMER if the request should be left running + * BLK_EH_NOT_HANDLED if the request is handled or terminated + * by the driver. + */ +enum blk_eh_timer_return dasd_times_out(struct request *req) +{ + struct dasd_ccw_req *cqr = req->completion_data; + struct dasd_block *block = req->q->queuedata; + struct dasd_device *device; + int rc = 0; + + if (!cqr) + return BLK_EH_NOT_HANDLED; + + device = cqr->startdev ? cqr->startdev : block->base; + if (!device->blk_timeout) + return BLK_EH_RESET_TIMER; + DBF_DEV_EVENT(DBF_WARNING, device, + " dasd_times_out cqr %p status %x", + cqr, cqr->status); + + spin_lock(&block->queue_lock); + spin_lock(get_ccwdev_lock(device->cdev)); + cqr->retries = -1; + cqr->intrc = -ETIMEDOUT; + if (cqr->status >= DASD_CQR_QUEUED) { + spin_unlock(get_ccwdev_lock(device->cdev)); + rc = dasd_cancel_req(cqr); + } else if (cqr->status == DASD_CQR_FILLED || + cqr->status == DASD_CQR_NEED_ERP) { + cqr->status = DASD_CQR_TERMINATED; + spin_unlock(get_ccwdev_lock(device->cdev)); + } else if (cqr->status == DASD_CQR_IN_ERP) { + struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; + + list_for_each_entry_safe(searchcqr, nextcqr, + &block->ccw_queue, blocklist) { + tmpcqr = searchcqr; + while (tmpcqr->refers) + tmpcqr = tmpcqr->refers; + if (tmpcqr != cqr) + continue; + /* searchcqr is an ERP request for cqr */ + searchcqr->retries = -1; + searchcqr->intrc = -ETIMEDOUT; + if (searchcqr->status >= DASD_CQR_QUEUED) { + spin_unlock(get_ccwdev_lock(device->cdev)); + rc = dasd_cancel_req(searchcqr); + spin_lock(get_ccwdev_lock(device->cdev)); + } else if ((searchcqr->status == DASD_CQR_FILLED) || + (searchcqr->status == DASD_CQR_NEED_ERP)) { + searchcqr->status = DASD_CQR_TERMINATED; + rc = 0; + } else if (searchcqr->status == DASD_CQR_IN_ERP) { + /* + * Shouldn't happen; most recent ERP + * request is at the front of queue + */ + continue; + } + break; + } + spin_unlock(get_ccwdev_lock(device->cdev)); + } + dasd_schedule_block_bh(block); + spin_unlock(&block->queue_lock); + + return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; +} + +/* * Allocate and initialize request queue and default I/O scheduler. */ static int dasd_alloc_queue(struct dasd_block *block) @@ -1982,12 +2979,12 @@ static int dasd_alloc_queue(struct dasd_block *block) elevator_exit(block->request_queue->elevator); block->request_queue->elevator = NULL; + mutex_lock(&block->request_queue->sysfs_lock); rc = elevator_init(block->request_queue, "deadline"); - if (rc) { + if (rc) blk_cleanup_queue(block->request_queue); - return rc; - } - return 0; + mutex_unlock(&block->request_queue->sysfs_lock); + return rc; } /* @@ -1997,17 +2994,27 @@ static void dasd_setup_queue(struct dasd_block *block) { int max; - blk_queue_hardsect_size(block->request_queue, block->bp_block); - max = block->base->discipline->max_blocks << block->s2b_shift; - blk_queue_max_sectors(block->request_queue, max); - blk_queue_max_phys_segments(block->request_queue, -1L); - blk_queue_max_hw_segments(block->request_queue, -1L); + if (block->base->features & DASD_FEATURE_USERAW) { + /* + * the max_blocks value for raw_track access is 256 + * it is higher than the native ECKD value because we + * only need one ccw per track + * so the max_hw_sectors are + * 2048 x 512B = 1024kB = 16 tracks + */ + max = 2048; + } else { + max = block->base->discipline->max_blocks << block->s2b_shift; + } + blk_queue_logical_block_size(block->request_queue, + block->bp_block); + blk_queue_max_hw_sectors(block->request_queue, max); + blk_queue_max_segments(block->request_queue, -1L); /* with page sized segments we can translate each segement into * one idaw/tidaw */ blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); - blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); } /* @@ -2032,20 +3039,21 @@ static void dasd_flush_request_queue(struct dasd_block *block) return; spin_lock_irq(&block->request_queue_lock); - while ((req = elv_next_request(block->request_queue))) { - blkdev_dequeue_request(req); - dasd_end_request(req, -EIO); - } + while ((req = blk_fetch_request(block->request_queue))) + __blk_end_request_all(req, -EIO); spin_unlock_irq(&block->request_queue_lock); } static int dasd_open(struct block_device *bdev, fmode_t mode) { - struct dasd_block *block = bdev->bd_disk->private_data; - struct dasd_device *base = block->base; + struct dasd_device *base; int rc; - atomic_inc(&block->open_count); + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; + + atomic_inc(&base->block->open_count); if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { rc = -ENODEV; goto unlock; @@ -2071,22 +3079,32 @@ static int dasd_open(struct block_device *bdev, fmode_t mode) goto out; } + if ((mode & FMODE_WRITE) && + (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || + (base->features & DASD_FEATURE_READONLY))) { + rc = -EROFS; + goto out; + } + + dasd_put_device(base); return 0; out: module_put(base->discipline->owner); unlock: - atomic_dec(&block->open_count); + atomic_dec(&base->block->open_count); + dasd_put_device(base); return rc; } -static int dasd_release(struct gendisk *disk, fmode_t mode) +static void dasd_release(struct gendisk *disk, fmode_t mode) { - struct dasd_block *block = disk->private_data; - - atomic_dec(&block->open_count); - module_put(block->base->discipline->owner); - return 0; + struct dasd_device *base = dasd_device_from_gendisk(disk); + if (base) { + atomic_dec(&base->block->open_count); + module_put(base->discipline->owner); + dasd_put_device(base); + } } /* @@ -2094,24 +3112,24 @@ static int dasd_release(struct gendisk *disk, fmode_t mode) */ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { - struct dasd_block *block; struct dasd_device *base; - block = bdev->bd_disk->private_data; - base = block->base; - if (!block) + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) return -ENODEV; if (!base->discipline || - !base->discipline->fill_geometry) + !base->discipline->fill_geometry) { + dasd_put_device(base); return -EINVAL; - - base->discipline->fill_geometry(block, geo); - geo->start = get_start_sect(bdev) >> block->s2b_shift; + } + base->discipline->fill_geometry(base->block, geo); + geo->start = get_start_sect(bdev) >> base->block->s2b_shift; + dasd_put_device(base); return 0; } -struct block_device_operations +const struct block_device_operations dasd_device_operations = { .owner = THIS_MODULE, .open = dasd_open, @@ -2142,6 +3160,7 @@ dasd_exit(void) debug_unregister(dasd_debug_area); dasd_debug_area = NULL; } + dasd_statistics_removeroot(); } /* @@ -2149,6 +3168,45 @@ dasd_exit(void) */ /* + * Is the device read-only? + * Note that this function does not report the setting of the + * readonly device attribute, but how it is configured in z/VM. + */ +int dasd_device_is_ro(struct dasd_device *device) +{ + struct ccw_dev_id dev_id; + struct diag210 diag_data; + int rc; + + if (!MACHINE_IS_VM) + return 0; + ccw_device_get_id(device->cdev, &dev_id); + memset(&diag_data, 0, sizeof(diag_data)); + diag_data.vrdcdvno = dev_id.devno; + diag_data.vrdclen = sizeof(diag_data); + rc = diag210(&diag_data); + if (rc == 0 || rc == 2) { + return diag_data.vrdcvfla & 0x80; + } else { + DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", + dev_id.devno, rc); + return 0; + } +} +EXPORT_SYMBOL_GPL(dasd_device_is_ro); + +static void dasd_generic_auto_online(void *data, async_cookie_t cookie) +{ + struct ccw_device *cdev = data; + int ret; + + ret = ccw_device_set_online(cdev); + if (ret) + pr_warning("%s: Setting the DASD online failed with rc=%d\n", + dev_name(&cdev->dev), ret); +} + +/* * Initial attempt at a probe function. this can be simplified once * the other detection code is gone. */ @@ -2157,18 +3215,11 @@ int dasd_generic_probe(struct ccw_device *cdev, { int ret; - ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); - if (ret) { - DBF_EVENT(DBF_WARNING, - "dasd_generic_probe: could not set ccw-device options " - "for %s\n", dev_name(&cdev->dev)); - return ret; - } ret = dasd_add_sysfs_files(cdev); if (ret) { - DBF_EVENT(DBF_WARNING, - "dasd_generic_probe: could not add sysfs entries " - "for %s\n", dev_name(&cdev->dev)); + DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", + "dasd_generic_probe: could not add " + "sysfs entries"); return ret; } cdev->handler = &dasd_int_handler; @@ -2180,10 +3231,7 @@ int dasd_generic_probe(struct ccw_device *cdev, */ if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) - ret = ccw_device_set_online(cdev); - if (ret) - pr_warning("%s: Setting the DASD online failed with rc=%d\n", - dev_name(&cdev->dev), ret); + async_schedule(dasd_generic_auto_online, cdev); return 0; } @@ -2198,13 +3246,16 @@ void dasd_generic_remove(struct ccw_device *cdev) cdev->handler = NULL; - dasd_remove_sysfs_files(cdev); device = dasd_device_from_cdev(cdev); - if (IS_ERR(device)) + if (IS_ERR(device)) { + dasd_remove_sysfs_files(cdev); return; - if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { + } + if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && + !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { /* Already doing offline processing */ dasd_put_device(device); + dasd_remove_sysfs_files(cdev); return; } /* @@ -2215,7 +3266,6 @@ void dasd_generic_remove(struct ccw_device *cdev) dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ block = device->block; - device->block = NULL; dasd_delete_device(device); /* * life cycle of block is bound to device, so delete it after @@ -2223,6 +3273,8 @@ void dasd_generic_remove(struct ccw_device *cdev) */ if (block) dasd_free_block(block); + + dasd_remove_sysfs_files(cdev); } /* @@ -2291,12 +3343,9 @@ int dasd_generic_set_online(struct ccw_device *cdev, pr_debug("dasd_generic device %s found\n", dev_name(&cdev->dev)); - /* FIXME: we have to wait for the root device but we don't want - * to wait for each single device but for all at once. */ wait_event(dasd_init_waitq, _wait_for_device(device)); dasd_put_device(device); - return rc; } @@ -2304,16 +3353,13 @@ int dasd_generic_set_offline(struct ccw_device *cdev) { struct dasd_device *device; struct dasd_block *block; - int max_count, open_count; + int max_count, open_count, rc; + rc = 0; device = dasd_device_from_cdev(cdev); if (IS_ERR(device)) return PTR_ERR(device); - if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { - /* Already doing offline processing */ - dasd_put_device(device); - return 0; - } + /* * We must make sure that this device is currently not in use. * The open_count is increased for every opener, that includes @@ -2337,10 +3383,57 @@ int dasd_generic_set_offline(struct ccw_device *cdev) return -EBUSY; } } + + if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* + * safe offline already running + * could only be called by normal offline so safe_offline flag + * needs to be removed to run normal offline and kill all I/O + */ + if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { + /* Already doing normal offline processing */ + dasd_put_device(device); + return -EBUSY; + } else + clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); + + } else + if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { + /* Already doing offline processing */ + dasd_put_device(device); + return -EBUSY; + } + + /* + * if safe_offline called set safe_offline_running flag and + * clear safe_offline so that a call to normal offline + * can overrun safe_offline processing + */ + if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && + !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* + * If we want to set the device safe offline all IO operations + * should be finished before continuing the offline process + * so sync bdev first and then wait for our queues to become + * empty + */ + /* sync blockdev and partitions */ + rc = fsync_bdev(device->block->bdev); + if (rc != 0) + goto interrupted; + + /* schedule device tasklet and wait for completion */ + dasd_schedule_device_bh(device); + rc = wait_event_interruptible(shutdown_waitq, + _wait_for_empty_queues(device)); + if (rc != 0) + goto interrupted; + } + + set_bit(DASD_FLAG_OFFLINE, &device->flags); dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ block = device->block; - device->block = NULL; dasd_delete_device(device); /* * life cycle of block is bound to device, so delete it after @@ -2349,12 +3442,63 @@ int dasd_generic_set_offline(struct ccw_device *cdev) if (block) dasd_free_block(block); return 0; + +interrupted: + /* interrupted by signal */ + clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); + clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); + clear_bit(DASD_FLAG_OFFLINE, &device->flags); + dasd_put_device(device); + return rc; } +int dasd_generic_last_path_gone(struct dasd_device *device) +{ + struct dasd_ccw_req *cqr; + + dev_warn(&device->cdev->dev, "No operational channel path is left " + "for the device\n"); + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); + /* First of all call extended error reporting. */ + dasd_eer_write(device, NULL, DASD_EER_NOPATH); + + if (device->state < DASD_STATE_BASIC) + return 0; + /* Device is active. We want to keep it. */ + list_for_each_entry(cqr, &device->ccw_queue, devlist) + if ((cqr->status == DASD_CQR_IN_IO) || + (cqr->status == DASD_CQR_CLEAR_PENDING)) { + cqr->status = DASD_CQR_QUEUED; + cqr->retries++; + } + dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); + dasd_device_clear_timer(device); + dasd_schedule_device_bh(device); + return 1; +} +EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); + +int dasd_generic_path_operational(struct dasd_device *device) +{ + dev_info(&device->cdev->dev, "A channel path to the device has become " + "operational\n"); + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); + dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); + if (device->stopped & DASD_UNRESUMED_PM) { + dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); + dasd_restore_device(device); + return 1; + } + dasd_schedule_device_bh(device); + if (device->block) + dasd_schedule_block_bh(device->block); + return 1; +} +EXPORT_SYMBOL_GPL(dasd_generic_path_operational); + int dasd_generic_notify(struct ccw_device *cdev, int event) { struct dasd_device *device; - struct dasd_ccw_req *cqr; int ret; device = dasd_device_from_cdev_locked(cdev); @@ -2363,43 +3507,218 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) ret = 0; switch (event) { case CIO_GONE: + case CIO_BOXED: case CIO_NO_PATH: - /* First of all call extended error reporting. */ - dasd_eer_write(device, NULL, DASD_EER_NOPATH); - - if (device->state < DASD_STATE_BASIC) - break; - /* Device is active. We want to keep it. */ - list_for_each_entry(cqr, &device->ccw_queue, devlist) - if (cqr->status == DASD_CQR_IN_IO) { - cqr->status = DASD_CQR_QUEUED; - cqr->retries++; - } - device->stopped |= DASD_STOPPED_DC_WAIT; - dasd_device_clear_timer(device); - dasd_schedule_device_bh(device); - ret = 1; + device->path_data.opm = 0; + device->path_data.ppm = 0; + device->path_data.npm = 0; + ret = dasd_generic_last_path_gone(device); break; case CIO_OPER: - /* FIXME: add a sanity check. */ - device->stopped &= ~DASD_STOPPED_DC_WAIT; - dasd_schedule_device_bh(device); - if (device->block) - dasd_schedule_block_bh(device->block); ret = 1; + if (device->path_data.opm) + ret = dasd_generic_path_operational(device); break; } dasd_put_device(device); return ret; } +void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) +{ + int chp; + __u8 oldopm, eventlpm; + struct dasd_device *device; + + device = dasd_device_from_cdev_locked(cdev); + if (IS_ERR(device)) + return; + for (chp = 0; chp < 8; chp++) { + eventlpm = 0x80 >> chp; + if (path_event[chp] & PE_PATH_GONE) { + oldopm = device->path_data.opm; + device->path_data.opm &= ~eventlpm; + device->path_data.ppm &= ~eventlpm; + device->path_data.npm &= ~eventlpm; + if (oldopm && !device->path_data.opm) { + dev_warn(&device->cdev->dev, + "No verified channel paths remain " + "for the device\n"); + DBF_DEV_EVENT(DBF_WARNING, device, + "%s", "last verified path gone"); + dasd_eer_write(device, NULL, DASD_EER_NOPATH); + dasd_device_set_stop_bits(device, + DASD_STOPPED_DC_WAIT); + } + } + if (path_event[chp] & PE_PATH_AVAILABLE) { + device->path_data.opm &= ~eventlpm; + device->path_data.ppm &= ~eventlpm; + device->path_data.npm &= ~eventlpm; + device->path_data.tbvpm |= eventlpm; + dasd_schedule_device_bh(device); + } + if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { + if (!(device->path_data.opm & eventlpm) && + !(device->path_data.tbvpm & eventlpm)) { + /* + * we can not establish a pathgroup on an + * unavailable path, so trigger a path + * verification first + */ + device->path_data.tbvpm |= eventlpm; + dasd_schedule_device_bh(device); + } + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "Pathgroup re-established\n"); + if (device->discipline->kick_validate) + device->discipline->kick_validate(device); + } + } + dasd_put_device(device); +} +EXPORT_SYMBOL_GPL(dasd_generic_path_event); + +int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) +{ + if (!device->path_data.opm && lpm) { + device->path_data.opm = lpm; + dasd_generic_path_operational(device); + } else + device->path_data.opm |= lpm; + return 0; +} +EXPORT_SYMBOL_GPL(dasd_generic_verify_path); + + +int dasd_generic_pm_freeze(struct ccw_device *cdev) +{ + struct dasd_device *device = dasd_device_from_cdev(cdev); + struct list_head freeze_queue; + struct dasd_ccw_req *cqr, *n; + struct dasd_ccw_req *refers; + int rc; + + if (IS_ERR(device)) + return PTR_ERR(device); + + /* mark device as suspended */ + set_bit(DASD_FLAG_SUSPENDED, &device->flags); + + if (device->discipline->freeze) + rc = device->discipline->freeze(device); + + /* disallow new I/O */ + dasd_device_set_stop_bits(device, DASD_STOPPED_PM); + + /* clear active requests and requeue them to block layer if possible */ + INIT_LIST_HEAD(&freeze_queue); + spin_lock_irq(get_ccwdev_lock(cdev)); + rc = 0; + list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { + /* Check status and move request to flush_queue */ + if (cqr->status == DASD_CQR_IN_IO) { + rc = device->discipline->term_IO(cqr); + if (rc) { + /* unable to terminate requeust */ + dev_err(&device->cdev->dev, + "Unable to terminate request %p " + "on suspend\n", cqr); + spin_unlock_irq(get_ccwdev_lock(cdev)); + dasd_put_device(device); + return rc; + } + } + list_move_tail(&cqr->devlist, &freeze_queue); + } + spin_unlock_irq(get_ccwdev_lock(cdev)); + + list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { + wait_event(dasd_flush_wq, + (cqr->status != DASD_CQR_CLEAR_PENDING)); + if (cqr->status == DASD_CQR_CLEARED) + cqr->status = DASD_CQR_QUEUED; + + /* requeue requests to blocklayer will only work for + block device requests */ + if (_dasd_requeue_request(cqr)) + continue; + + /* remove requests from device and block queue */ + list_del_init(&cqr->devlist); + while (cqr->refers != NULL) { + refers = cqr->refers; + /* remove the request from the block queue */ + list_del(&cqr->blocklist); + /* free the finished erp request */ + dasd_free_erp_request(cqr, cqr->memdev); + cqr = refers; + } + if (cqr->block) + list_del_init(&cqr->blocklist); + cqr->block->base->discipline->free_cp( + cqr, (struct request *) cqr->callback_data); + } + + /* + * if requests remain then they are internal request + * and go back to the device queue + */ + if (!list_empty(&freeze_queue)) { + /* move freeze_queue to start of the ccw_queue */ + spin_lock_irq(get_ccwdev_lock(cdev)); + list_splice_tail(&freeze_queue, &device->ccw_queue); + spin_unlock_irq(get_ccwdev_lock(cdev)); + } + dasd_put_device(device); + return rc; +} +EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); + +int dasd_generic_restore_device(struct ccw_device *cdev) +{ + struct dasd_device *device = dasd_device_from_cdev(cdev); + int rc = 0; + + if (IS_ERR(device)) + return PTR_ERR(device); + + /* allow new IO again */ + dasd_device_remove_stop_bits(device, + (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); + + dasd_schedule_device_bh(device); + + /* + * call discipline restore function + * if device is stopped do nothing e.g. for disconnected devices + */ + if (device->discipline->restore && !(device->stopped)) + rc = device->discipline->restore(device); + if (rc || device->stopped) + /* + * if the resume failed for the DASD we put it in + * an UNRESUMED stop state + */ + device->stopped |= DASD_UNRESUMED_PM; + + if (device->block) + dasd_schedule_block_bh(device->block); + + clear_bit(DASD_FLAG_SUSPENDED, &device->flags); + dasd_put_device(device); + return 0; +} +EXPORT_SYMBOL_GPL(dasd_generic_restore_device); + static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, void *rdc_buffer, int rdc_buffer_size, - char *magic) + int magic) { struct dasd_ccw_req *cqr; struct ccw1 *ccw; + unsigned long *idaw; cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); @@ -2413,27 +3732,34 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, ccw = cqr->cpaddr; ccw->cmd_code = CCW_CMD_RDC; - ccw->cda = (__u32)(addr_t)rdc_buffer; - ccw->count = rdc_buffer_size; + if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { + idaw = (unsigned long *) (cqr->data); + ccw->cda = (__u32)(addr_t) idaw; + ccw->flags = CCW_FLAG_IDA; + idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); + } else { + ccw->cda = (__u32)(addr_t) rdc_buffer; + ccw->flags = 0; + } + ccw->count = rdc_buffer_size; cqr->startdev = device; cqr->memdev = device; cqr->expires = 10*HZ; - clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); - cqr->retries = 2; - cqr->buildclk = get_clock(); + cqr->retries = 256; + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } -int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, - void **rdc_buffer, int rdc_buffer_size) +int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, + void *rdc_buffer, int rdc_buffer_size) { int ret; struct dasd_ccw_req *cqr; - cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, + cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, magic); if (IS_ERR(cqr)) return PTR_ERR(cqr); @@ -2478,6 +3804,23 @@ char *dasd_get_sense(struct irb *irb) } EXPORT_SYMBOL_GPL(dasd_get_sense); +void dasd_generic_shutdown(struct ccw_device *cdev) +{ + struct dasd_device *device; + + device = dasd_device_from_cdev(cdev); + if (IS_ERR(device)) + return; + + if (device->block) + dasd_schedule_block_bh(device->block); + + dasd_schedule_device_bh(device); + + wait_event(shutdown_waitq, _wait_for_empty_queues(device)); +} +EXPORT_SYMBOL_GPL(dasd_generic_shutdown); + static int __init dasd_init(void) { int rc; @@ -2485,6 +3828,7 @@ static int __init dasd_init(void) init_waitqueue_head(&dasd_init_waitq); init_waitqueue_head(&dasd_flush_wq); init_waitqueue_head(&generic_waitq); + init_waitqueue_head(&shutdown_waitq); /* register 'common' DASD debug area, used for all DBF_XXX calls */ dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); @@ -2499,6 +3843,8 @@ static int __init dasd_init(void) dasd_diag_discipline_pointer = NULL; + dasd_statistics_createroot(); + rc = dasd_devmap_init(); if (rc) goto failed; |
