diff options
Diffstat (limited to 'drivers/s390')
191 files changed, 25075 insertions, 13949 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 07883197f47..4a3b6232618 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -2,7 +2,8 @@ comment "S/390 block device drivers" depends on S390 && BLOCK config BLK_DEV_XPRAM - tristate "XPRAM disk support" + def_tristate m + prompt "XPRAM disk support" depends on S390 && BLOCK help Select this option if you want to use your expanded storage on S/390 @@ -12,13 +13,15 @@ config BLK_DEV_XPRAM xpram. If unsure, say "N". config DCSSBLK - tristate "DCSSBLK support" + def_tristate m + prompt "DCSSBLK support" depends on S390 && BLOCK help Support for dcss block device config DASD - tristate "Support for DASD devices" + def_tristate y + prompt "Support for DASD devices" depends on CCW && BLOCK select IOSCHED_DEADLINE help @@ -27,28 +30,32 @@ config DASD natively on a single image or an LPAR. config DASD_PROFILE - bool "Profiling support for dasd devices" + def_bool y + prompt "Profiling support for dasd devices" depends on DASD help Enable this option if you want to see profiling information in /proc/dasd/statistics. config DASD_ECKD - tristate "Support for ECKD Disks" + def_tristate y + prompt "Support for ECKD Disks" depends on DASD help ECKD devices are the most commonly used devices. You should enable this option unless you are very sure to have no ECKD device. config DASD_FBA - tristate "Support for FBA Disks" + def_tristate y + prompt "Support for FBA Disks" depends on DASD help Select this option to be able to access FBA devices. It is safe to say "Y". config DASD_DIAG - tristate "Support for DIAG access to Disks" + def_tristate y + prompt "Support for DIAG access to Disks" depends on DASD help Select this option if you want to use Diagnose250 command to access @@ -56,9 +63,28 @@ config DASD_DIAG say "N". config DASD_EER - bool "Extended error reporting (EER)" + def_bool y + prompt "Extended error reporting (EER)" depends on DASD help This driver provides a character device interface to the DASD extended error reporting. This is only needed if you want to use applications written for the EER facility. + +config SCM_BLOCK + def_tristate m + prompt "Support for Storage Class Memory" + depends on S390 && BLOCK && EADM_SCH && SCM_BUS + help + Block device driver for Storage Class Memory (SCM). This driver + provides a block device interface for each available SCM increment. + + To compile this driver as a module, choose M here: the + module will be called scm_block. + +config SCM_BLOCK_CLUSTER_WRITE + def_bool y + prompt "SCM force cluster writes" + depends on SCM_BLOCK + help + Force writes to Storage Class Memory (SCM) to be in done in clusters. diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile index 0a89e080b38..c2f4e673e03 100644 --- a/drivers/s390/block/Makefile +++ b/drivers/s390/block/Makefile @@ -17,3 +17,9 @@ obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o obj-$(CONFIG_DCSSBLK) += dcssblk.o + +scm_block-objs := scm_drv.o scm_blk.o +ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE +scm_block-objs += scm_blk_cluster.o +endif +obj-$(CONFIG_SCM_BLOCK) += scm_block.o diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index bbea90baf98..1eef0f58695 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1,5 +1,4 @@ /* - * File...........: linux/drivers/s390/block/dasd.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com> @@ -17,10 +16,12 @@ #include <linux/ctype.h> #include <linux/major.h> #include <linux/slab.h> -#include <linux/buffer_head.h> #include <linux/hdreg.h> #include <linux/async.h> #include <linux/mutex.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/vmalloc.h> #include <asm/ccwdev.h> #include <asm/ebcdic.h> @@ -41,12 +42,13 @@ * SECTION: exported variables of dasd.c */ debug_info_t *dasd_debug_area; +static struct dentry *dasd_debugfs_root_entry; struct dasd_discipline *dasd_diag_discipline_pointer; void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); MODULE_DESCRIPTION("Linux on S/390 DASD device driver," - " Copyright 2000 IBM Corporation"); + " Copyright IBM Corp. 2000"); MODULE_SUPPORTED_DEVICE("dasd"); MODULE_LICENSE("GPL"); @@ -62,10 +64,13 @@ static void dasd_device_tasklet(struct dasd_device *); static void dasd_block_tasklet(struct dasd_block *); static void do_kick_device(struct work_struct *); static void do_restore_device(struct work_struct *); +static void do_reload_device(struct work_struct *); static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); static void dasd_device_timeout(unsigned long); static void dasd_block_timeout(unsigned long); static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); +static void dasd_profile_init(struct dasd_profile *, struct dentry *); +static void dasd_profile_exit(struct dasd_profile *); /* * SECTION: Operations on the device structure. @@ -73,6 +78,7 @@ static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); static wait_queue_head_t dasd_init_waitq; static wait_queue_head_t dasd_flush_wq; static wait_queue_head_t generic_waitq; +static wait_queue_head_t shutdown_waitq; /* * Allocate memory for a new device structure. @@ -112,10 +118,11 @@ struct dasd_device *dasd_alloc_device(void) device->timer.data = (unsigned long) device; INIT_WORK(&device->kick_work, do_kick_device); INIT_WORK(&device->restore_device, do_restore_device); + INIT_WORK(&device->reload_device, do_reload_device); device->state = DASD_STATE_NEW; device->target = DASD_STATE_NEW; mutex_init(&device->state_mutex); - + spin_lock_init(&device->profile.lock); return device; } @@ -153,6 +160,7 @@ struct dasd_block *dasd_alloc_block(void) init_timer(&block->timer); block->timer.function = dasd_block_timeout; block->timer.data = (unsigned long) block; + spin_lock_init(&block->profile.lock); return block; } @@ -216,19 +224,44 @@ static int dasd_state_known_to_new(struct dasd_device *device) return 0; } +static struct dentry *dasd_debugfs_setup(const char *name, + struct dentry *base_dentry) +{ + struct dentry *pde; + + if (!base_dentry) + return NULL; + pde = debugfs_create_dir(name, base_dentry); + if (!pde || IS_ERR(pde)) + return NULL; + return pde; +} + /* * Request the irq line for the device. */ static int dasd_state_known_to_basic(struct dasd_device *device) { - int rc; + struct dasd_block *block = device->block; + int rc = 0; /* Allocate and register gendisk structure. */ - if (device->block) { - rc = dasd_gendisk_alloc(device->block); + if (block) { + rc = dasd_gendisk_alloc(block); if (rc) return rc; - } + block->debugfs_dentry = + dasd_debugfs_setup(block->gdp->disk_name, + dasd_debugfs_root_entry); + dasd_profile_init(&block->profile, block->debugfs_dentry); + if (dasd_global_profile_level == DASD_PROFILE_ON) + dasd_profile_on(&device->block->profile); + } + device->debugfs_dentry = + dasd_debugfs_setup(dev_name(&device->cdev->dev), + dasd_debugfs_root_entry); + dasd_profile_init(&device->profile, device->debugfs_dentry); + /* register 'device' debug area, used for all DBF_DEV_XXX calls */ device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 8 * sizeof(long)); @@ -237,7 +270,8 @@ static int dasd_state_known_to_basic(struct dasd_device *device) DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); device->state = DASD_STATE_BASIC; - return 0; + + return rc; } /* @@ -246,7 +280,11 @@ static int dasd_state_known_to_basic(struct dasd_device *device) static int dasd_state_basic_to_known(struct dasd_device *device) { int rc; + if (device->block) { + dasd_profile_exit(&device->block->profile); + if (device->block->debugfs_dentry) + debugfs_remove(device->block->debugfs_dentry); dasd_gendisk_free(device->block); dasd_block_clear_timer(device->block); } @@ -254,6 +292,9 @@ static int dasd_state_basic_to_known(struct dasd_device *device) if (rc) return rc; dasd_device_clear_timer(device); + dasd_profile_exit(&device->profile); + if (device->debugfs_dentry) + debugfs_remove(device->debugfs_dentry); DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); if (device->debug_area != NULL) { @@ -290,8 +331,10 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) if (block->base->discipline->do_analysis != NULL) rc = block->base->discipline->do_analysis(block); if (rc) { - if (rc != -EAGAIN) + if (rc != -EAGAIN) { device->state = DASD_STATE_UNFMT; + goto out; + } return rc; } dasd_setup_queue(block); @@ -299,14 +342,29 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) block->blocks << block->s2b_shift); device->state = DASD_STATE_READY; rc = dasd_scan_partitions(block); - if (rc) + if (rc) { device->state = DASD_STATE_BASIC; + return rc; + } } else { device->state = DASD_STATE_READY; } +out: + if (device->discipline->basic_to_ready) + rc = device->discipline->basic_to_ready(device); return rc; } +static inline +int _wait_for_empty_queues(struct dasd_device *device) +{ + if (device->block) + return list_empty(&device->ccw_queue) && + list_empty(&device->block->ccw_queue); + else + return list_empty(&device->ccw_queue); +} + /* * Remove device from block device layer. Destroy dirty buffers. * Forget format information. Check if the target level is basic @@ -316,6 +374,11 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) { int rc; + if (device->discipline->ready_to_basic) { + rc = device->discipline->ready_to_basic(device); + if (rc) + return rc; + } device->state = DASD_STATE_BASIC; if (device->block) { struct dasd_block *block = device->block; @@ -350,19 +413,18 @@ static int dasd_state_unfmt_to_basic(struct dasd_device *device) static int dasd_state_ready_to_online(struct dasd_device * device) { - int rc; struct gendisk *disk; struct disk_part_iter piter; struct hd_struct *part; - if (device->discipline->ready_to_online) { - rc = device->discipline->ready_to_online(device); - if (rc) - return rc; - } device->state = DASD_STATE_ONLINE; if (device->block) { dasd_schedule_block_bh(device->block); + if ((device->features & DASD_FEATURE_USERAW)) { + disk = device->block->gdp; + kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); + return 0; + } disk = device->block->bdev->bd_disk; disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); while ((part = disk_part_iter_next(&piter))) @@ -387,8 +449,9 @@ static int dasd_state_online_to_ready(struct dasd_device *device) if (rc) return rc; } + device->state = DASD_STATE_READY; - if (device->block) { + if (device->block && !(device->features & DASD_FEATURE_USERAW)) { disk = device->block->bdev->bd_disk; disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); while ((part = disk_part_iter_next(&piter))) @@ -487,11 +550,11 @@ static void dasd_change_state(struct dasd_device *device) if (rc) device->target = device->state; - if (device->state == device->target) - wake_up(&dasd_init_waitq); - /* let user-space know that the device status changed */ kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); + + if (device->state == device->target) + wake_up(&dasd_init_waitq); } /* @@ -518,6 +581,26 @@ void dasd_kick_device(struct dasd_device *device) } /* + * dasd_reload_device will schedule a call do do_reload_device to the kernel + * event daemon. + */ +static void do_reload_device(struct work_struct *work) +{ + struct dasd_device *device = container_of(work, struct dasd_device, + reload_device); + device->discipline->reload(device); + dasd_put_device(device); +} + +void dasd_reload_device(struct dasd_device *device) +{ + dasd_get_device(device); + /* queue call to dasd_reload_device to the kernel event daemon. */ + schedule_work(&device->reload_device); +} +EXPORT_SYMBOL(dasd_reload_device); + +/* * dasd_restore_device will schedule a call do do_restore_device to the kernel * event daemon. */ @@ -573,26 +656,22 @@ void dasd_enable_device(struct dasd_device *device) dasd_set_target_state(device, DASD_STATE_NEW); /* Now wait for the devices to come up. */ wait_event(dasd_init_waitq, _wait_for_device(device)); + + dasd_reload_device(device); + if (device->discipline->kick_validate) + device->discipline->kick_validate(device); } /* * SECTION: device operation (interrupt handler, start i/o, term i/o ...) */ -#ifdef CONFIG_DASD_PROFILE -struct dasd_profile_info_t dasd_global_profile; -unsigned int dasd_profile_level = DASD_PROFILE_OFF; +unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; -/* - * Increments counter in global and local profiling structures. - */ -#define dasd_profile_counter(value, counter, block) \ -{ \ - int index; \ - for (index = 0; index < 31 && value >> (2+index); index++); \ - dasd_global_profile.counter[index]++; \ - block->profile.counter[index]++; \ -} +#ifdef CONFIG_DASD_PROFILE +struct dasd_profile_info dasd_global_profile_data; +static struct dentry *dasd_global_profile_dentry; +static struct dentry *dasd_debugfs_global_entry; /* * Add profiling information for cqr before execution. @@ -603,30 +682,122 @@ static void dasd_profile_start(struct dasd_block *block, { struct list_head *l; unsigned int counter; - - if (dasd_profile_level != DASD_PROFILE_ON) - return; + struct dasd_device *device; /* count the length of the chanq for statistics */ counter = 0; - list_for_each(l, &block->ccw_queue) - if (++counter >= 31) - break; - dasd_global_profile.dasd_io_nr_req[counter]++; - block->profile.dasd_io_nr_req[counter]++; + if (dasd_global_profile_level || block->profile.data) + list_for_each(l, &block->ccw_queue) + if (++counter >= 31) + break; + + if (dasd_global_profile_level) { + dasd_global_profile_data.dasd_io_nr_req[counter]++; + if (rq_data_dir(req) == READ) + dasd_global_profile_data.dasd_read_nr_req[counter]++; + } + + spin_lock(&block->profile.lock); + if (block->profile.data) { + block->profile.data->dasd_io_nr_req[counter]++; + if (rq_data_dir(req) == READ) + block->profile.data->dasd_read_nr_req[counter]++; + } + spin_unlock(&block->profile.lock); + + /* + * We count the request for the start device, even though it may run on + * some other device due to error recovery. This way we make sure that + * we count each request only once. + */ + device = cqr->startdev; + if (device->profile.data) { + counter = 1; /* request is not yet queued on the start device */ + list_for_each(l, &device->ccw_queue) + if (++counter >= 31) + break; + } + spin_lock(&device->profile.lock); + if (device->profile.data) { + device->profile.data->dasd_io_nr_req[counter]++; + if (rq_data_dir(req) == READ) + device->profile.data->dasd_read_nr_req[counter]++; + } + spin_unlock(&device->profile.lock); } /* * Add profiling information for cqr after execution. */ + +#define dasd_profile_counter(value, index) \ +{ \ + for (index = 0; index < 31 && value >> (2+index); index++) \ + ; \ +} + +static void dasd_profile_end_add_data(struct dasd_profile_info *data, + int is_alias, + int is_tpm, + int is_read, + long sectors, + int sectors_ind, + int tottime_ind, + int tottimeps_ind, + int strtime_ind, + int irqtime_ind, + int irqtimeps_ind, + int endtime_ind) +{ + /* in case of an overflow, reset the whole profile */ + if (data->dasd_io_reqs == UINT_MAX) { + memset(data, 0, sizeof(*data)); + getnstimeofday(&data->starttod); + } + data->dasd_io_reqs++; + data->dasd_io_sects += sectors; + if (is_alias) + data->dasd_io_alias++; + if (is_tpm) + data->dasd_io_tpm++; + + data->dasd_io_secs[sectors_ind]++; + data->dasd_io_times[tottime_ind]++; + data->dasd_io_timps[tottimeps_ind]++; + data->dasd_io_time1[strtime_ind]++; + data->dasd_io_time2[irqtime_ind]++; + data->dasd_io_time2ps[irqtimeps_ind]++; + data->dasd_io_time3[endtime_ind]++; + + if (is_read) { + data->dasd_read_reqs++; + data->dasd_read_sects += sectors; + if (is_alias) + data->dasd_read_alias++; + if (is_tpm) + data->dasd_read_tpm++; + data->dasd_read_secs[sectors_ind]++; + data->dasd_read_times[tottime_ind]++; + data->dasd_read_time1[strtime_ind]++; + data->dasd_read_time2[irqtime_ind]++; + data->dasd_read_time3[endtime_ind]++; + } +} + static void dasd_profile_end(struct dasd_block *block, struct dasd_ccw_req *cqr, struct request *req) { long strtime, irqtime, endtime, tottime; /* in microseconds */ long tottimeps, sectors; + struct dasd_device *device; + int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; + int irqtime_ind, irqtimeps_ind, endtime_ind; - if (dasd_profile_level != DASD_PROFILE_ON) + device = cqr->startdev; + if (!(dasd_global_profile_level || + block->profile.data || + device->profile.data)) return; sectors = blk_rq_sectors(req); @@ -641,29 +812,392 @@ static void dasd_profile_end(struct dasd_block *block, tottime = ((cqr->endclk - cqr->buildclk) >> 12); tottimeps = tottime / sectors; - if (!dasd_global_profile.dasd_io_reqs) - memset(&dasd_global_profile, 0, - sizeof(struct dasd_profile_info_t)); - dasd_global_profile.dasd_io_reqs++; - dasd_global_profile.dasd_io_sects += sectors; - - if (!block->profile.dasd_io_reqs) - memset(&block->profile, 0, - sizeof(struct dasd_profile_info_t)); - block->profile.dasd_io_reqs++; - block->profile.dasd_io_sects += sectors; - - dasd_profile_counter(sectors, dasd_io_secs, block); - dasd_profile_counter(tottime, dasd_io_times, block); - dasd_profile_counter(tottimeps, dasd_io_timps, block); - dasd_profile_counter(strtime, dasd_io_time1, block); - dasd_profile_counter(irqtime, dasd_io_time2, block); - dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); - dasd_profile_counter(endtime, dasd_io_time3, block); + dasd_profile_counter(sectors, sectors_ind); + dasd_profile_counter(tottime, tottime_ind); + dasd_profile_counter(tottimeps, tottimeps_ind); + dasd_profile_counter(strtime, strtime_ind); + dasd_profile_counter(irqtime, irqtime_ind); + dasd_profile_counter(irqtime / sectors, irqtimeps_ind); + dasd_profile_counter(endtime, endtime_ind); + + if (dasd_global_profile_level) { + dasd_profile_end_add_data(&dasd_global_profile_data, + cqr->startdev != block->base, + cqr->cpmode == 1, + rq_data_dir(req) == READ, + sectors, sectors_ind, tottime_ind, + tottimeps_ind, strtime_ind, + irqtime_ind, irqtimeps_ind, + endtime_ind); + } + + spin_lock(&block->profile.lock); + if (block->profile.data) + dasd_profile_end_add_data(block->profile.data, + cqr->startdev != block->base, + cqr->cpmode == 1, + rq_data_dir(req) == READ, + sectors, sectors_ind, tottime_ind, + tottimeps_ind, strtime_ind, + irqtime_ind, irqtimeps_ind, + endtime_ind); + spin_unlock(&block->profile.lock); + + spin_lock(&device->profile.lock); + if (device->profile.data) + dasd_profile_end_add_data(device->profile.data, + cqr->startdev != block->base, + cqr->cpmode == 1, + rq_data_dir(req) == READ, + sectors, sectors_ind, tottime_ind, + tottimeps_ind, strtime_ind, + irqtime_ind, irqtimeps_ind, + endtime_ind); + spin_unlock(&device->profile.lock); +} + +void dasd_profile_reset(struct dasd_profile *profile) +{ + struct dasd_profile_info *data; + + spin_lock_bh(&profile->lock); + data = profile->data; + if (!data) { + spin_unlock_bh(&profile->lock); + return; + } + memset(data, 0, sizeof(*data)); + getnstimeofday(&data->starttod); + spin_unlock_bh(&profile->lock); } + +void dasd_global_profile_reset(void) +{ + memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data)); + getnstimeofday(&dasd_global_profile_data.starttod); +} + +int dasd_profile_on(struct dasd_profile *profile) +{ + struct dasd_profile_info *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + spin_lock_bh(&profile->lock); + if (profile->data) { + spin_unlock_bh(&profile->lock); + kfree(data); + return 0; + } + getnstimeofday(&data->starttod); + profile->data = data; + spin_unlock_bh(&profile->lock); + return 0; +} + +void dasd_profile_off(struct dasd_profile *profile) +{ + spin_lock_bh(&profile->lock); + kfree(profile->data); + profile->data = NULL; + spin_unlock_bh(&profile->lock); +} + +char *dasd_get_user_string(const char __user *user_buf, size_t user_len) +{ + char *buffer; + + buffer = vmalloc(user_len + 1); + if (buffer == NULL) + return ERR_PTR(-ENOMEM); + if (copy_from_user(buffer, user_buf, user_len) != 0) { + vfree(buffer); + return ERR_PTR(-EFAULT); + } + /* got the string, now strip linefeed. */ + if (buffer[user_len - 1] == '\n') + buffer[user_len - 1] = 0; + else + buffer[user_len] = 0; + return buffer; +} + +static ssize_t dasd_stats_write(struct file *file, + const char __user *user_buf, + size_t user_len, loff_t *pos) +{ + char *buffer, *str; + int rc; + struct seq_file *m = (struct seq_file *)file->private_data; + struct dasd_profile *prof = m->private; + + if (user_len > 65536) + user_len = 65536; + buffer = dasd_get_user_string(user_buf, user_len); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + + str = skip_spaces(buffer); + rc = user_len; + if (strncmp(str, "reset", 5) == 0) { + dasd_profile_reset(prof); + } else if (strncmp(str, "on", 2) == 0) { + rc = dasd_profile_on(prof); + if (!rc) + rc = user_len; + } else if (strncmp(str, "off", 3) == 0) { + dasd_profile_off(prof); + } else + rc = -EINVAL; + vfree(buffer); + return rc; +} + +static void dasd_stats_array(struct seq_file *m, unsigned int *array) +{ + int i; + + for (i = 0; i < 32; i++) + seq_printf(m, "%u ", array[i]); + seq_putc(m, '\n'); +} + +static void dasd_stats_seq_print(struct seq_file *m, + struct dasd_profile_info *data) +{ + seq_printf(m, "start_time %ld.%09ld\n", + data->starttod.tv_sec, data->starttod.tv_nsec); + seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); + seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); + seq_printf(m, "total_pav %u\n", data->dasd_io_alias); + seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); + seq_printf(m, "histogram_sectors "); + dasd_stats_array(m, data->dasd_io_secs); + seq_printf(m, "histogram_io_times "); + dasd_stats_array(m, data->dasd_io_times); + seq_printf(m, "histogram_io_times_weighted "); + dasd_stats_array(m, data->dasd_io_timps); + seq_printf(m, "histogram_time_build_to_ssch "); + dasd_stats_array(m, data->dasd_io_time1); + seq_printf(m, "histogram_time_ssch_to_irq "); + dasd_stats_array(m, data->dasd_io_time2); + seq_printf(m, "histogram_time_ssch_to_irq_weighted "); + dasd_stats_array(m, data->dasd_io_time2ps); + seq_printf(m, "histogram_time_irq_to_end "); + dasd_stats_array(m, data->dasd_io_time3); + seq_printf(m, "histogram_ccw_queue_length "); + dasd_stats_array(m, data->dasd_io_nr_req); + seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); + seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); + seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); + seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); + seq_printf(m, "histogram_read_sectors "); + dasd_stats_array(m, data->dasd_read_secs); + seq_printf(m, "histogram_read_times "); + dasd_stats_array(m, data->dasd_read_times); + seq_printf(m, "histogram_read_time_build_to_ssch "); + dasd_stats_array(m, data->dasd_read_time1); + seq_printf(m, "histogram_read_time_ssch_to_irq "); + dasd_stats_array(m, data->dasd_read_time2); + seq_printf(m, "histogram_read_time_irq_to_end "); + dasd_stats_array(m, data->dasd_read_time3); + seq_printf(m, "histogram_read_ccw_queue_length "); + dasd_stats_array(m, data->dasd_read_nr_req); +} + +static int dasd_stats_show(struct seq_file *m, void *v) +{ + struct dasd_profile *profile; + struct dasd_profile_info *data; + + profile = m->private; + spin_lock_bh(&profile->lock); + data = profile->data; + if (!data) { + spin_unlock_bh(&profile->lock); + seq_printf(m, "disabled\n"); + return 0; + } + dasd_stats_seq_print(m, data); + spin_unlock_bh(&profile->lock); + return 0; +} + +static int dasd_stats_open(struct inode *inode, struct file *file) +{ + struct dasd_profile *profile = inode->i_private; + return single_open(file, dasd_stats_show, profile); +} + +static const struct file_operations dasd_stats_raw_fops = { + .owner = THIS_MODULE, + .open = dasd_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = dasd_stats_write, +}; + +static ssize_t dasd_stats_global_write(struct file *file, + const char __user *user_buf, + size_t user_len, loff_t *pos) +{ + char *buffer, *str; + ssize_t rc; + + if (user_len > 65536) + user_len = 65536; + buffer = dasd_get_user_string(user_buf, user_len); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + str = skip_spaces(buffer); + rc = user_len; + if (strncmp(str, "reset", 5) == 0) { + dasd_global_profile_reset(); + } else if (strncmp(str, "on", 2) == 0) { + dasd_global_profile_reset(); + dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; + } else if (strncmp(str, "off", 3) == 0) { + dasd_global_profile_level = DASD_PROFILE_OFF; + } else + rc = -EINVAL; + vfree(buffer); + return rc; +} + +static int dasd_stats_global_show(struct seq_file *m, void *v) +{ + if (!dasd_global_profile_level) { + seq_printf(m, "disabled\n"); + return 0; + } + dasd_stats_seq_print(m, &dasd_global_profile_data); + return 0; +} + +static int dasd_stats_global_open(struct inode *inode, struct file *file) +{ + return single_open(file, dasd_stats_global_show, NULL); +} + +static const struct file_operations dasd_stats_global_fops = { + .owner = THIS_MODULE, + .open = dasd_stats_global_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = dasd_stats_global_write, +}; + +static void dasd_profile_init(struct dasd_profile *profile, + struct dentry *base_dentry) +{ + umode_t mode; + struct dentry *pde; + + if (!base_dentry) + return; + profile->dentry = NULL; + profile->data = NULL; + mode = (S_IRUSR | S_IWUSR | S_IFREG); + pde = debugfs_create_file("statistics", mode, base_dentry, + profile, &dasd_stats_raw_fops); + if (pde && !IS_ERR(pde)) + profile->dentry = pde; + return; +} + +static void dasd_profile_exit(struct dasd_profile *profile) +{ + dasd_profile_off(profile); + if (profile->dentry) { + debugfs_remove(profile->dentry); + profile->dentry = NULL; + } +} + +static void dasd_statistics_removeroot(void) +{ + dasd_global_profile_level = DASD_PROFILE_OFF; + if (dasd_global_profile_dentry) { + debugfs_remove(dasd_global_profile_dentry); + dasd_global_profile_dentry = NULL; + } + if (dasd_debugfs_global_entry) + debugfs_remove(dasd_debugfs_global_entry); + if (dasd_debugfs_root_entry) + debugfs_remove(dasd_debugfs_root_entry); +} + +static void dasd_statistics_createroot(void) +{ + umode_t mode; + struct dentry *pde; + + dasd_debugfs_root_entry = NULL; + dasd_debugfs_global_entry = NULL; + dasd_global_profile_dentry = NULL; + pde = debugfs_create_dir("dasd", NULL); + if (!pde || IS_ERR(pde)) + goto error; + dasd_debugfs_root_entry = pde; + pde = debugfs_create_dir("global", dasd_debugfs_root_entry); + if (!pde || IS_ERR(pde)) + goto error; + dasd_debugfs_global_entry = pde; + + mode = (S_IRUSR | S_IWUSR | S_IFREG); + pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry, + NULL, &dasd_stats_global_fops); + if (!pde || IS_ERR(pde)) + goto error; + dasd_global_profile_dentry = pde; + return; + +error: + DBF_EVENT(DBF_ERR, "%s", + "Creation of the dasd debugfs interface failed"); + dasd_statistics_removeroot(); + return; +} + #else #define dasd_profile_start(block, cqr, req) do {} while (0) #define dasd_profile_end(block, cqr, req) do {} while (0) + +static void dasd_statistics_createroot(void) +{ + return; +} + +static void dasd_statistics_removeroot(void) +{ + return; +} + +int dasd_stats_generic_show(struct seq_file *m, void *v) +{ + seq_printf(m, "Statistics are not activated in this kernel\n"); + return 0; +} + +static void dasd_profile_init(struct dasd_profile *profile, + struct dentry *base_dentry) +{ + return; +} + +static void dasd_profile_exit(struct dasd_profile *profile) +{ + return; +} + +int dasd_profile_on(struct dasd_profile *profile) +{ + return 0; +} + #endif /* CONFIG_DASD_PROFILE */ /* @@ -719,10 +1253,6 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, char *data; int size; - /* Sanity checks */ - BUG_ON(datasize > PAGE_SIZE || - (cplength*sizeof(struct ccw1)) > PAGE_SIZE); - size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; if (cplength > 0) size += cplength * sizeof(struct ccw1); @@ -828,9 +1358,8 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) rc = ccw_device_clear(device->cdev, (long) cqr); switch (rc) { case 0: /* termination successful */ - cqr->retries--; cqr->status = DASD_CQR_CLEAR_PENDING; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->starttime = 0; DBF_DEV_EVENT(DBF_DEBUG, device, "terminate cqr %p successful", @@ -880,6 +1409,16 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) return rc; } device = (struct dasd_device *) cqr->startdev; + if (((cqr->block && + test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || + test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " + "because of stolen lock", cqr); + cqr->status = DASD_CQR_ERROR; + cqr->intrc = -EPERM; + return -EPERM; + } if (cqr->retries < 0) { /* internal error 14 - start_IO run out of retries */ sprintf(errorstring, "14 %p", cqr); @@ -888,9 +1427,14 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) cqr->status = DASD_CQR_ERROR; return -EIO; } - cqr->startclk = get_clock(); + cqr->startclk = get_tod_clock(); cqr->starttime = jiffies; cqr->retries--; + if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { + cqr->lpm &= device->path_data.opm; + if (!cqr->lpm) + cqr->lpm = device->path_data.opm; + } if (cqr->cpmode == 1) { rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, (long) cqr, cqr->lpm); @@ -903,35 +1447,53 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) cqr->status = DASD_CQR_IN_IO; break; case -EBUSY: - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: device busy, retry later"); break; case -ETIMEDOUT: - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: request timeout, retry later"); break; case -EACCES: - /* -EACCES indicates that the request used only a - * subset of the available pathes and all these - * pathes are gone. - * Do a retry with all available pathes. + /* -EACCES indicates that the request used only a subset of the + * available paths and all these paths are gone. If the lpm of + * this request was only a subset of the opm (e.g. the ppm) then + * we just do a retry with all available paths. + * If we already use the full opm, something is amiss, and we + * need a full path verification. */ - cqr->lpm = LPM_ANYPATH; - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", - "start_IO: selected pathes gone," - " retry on all pathes"); + if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { + DBF_DEV_EVENT(DBF_WARNING, device, + "start_IO: selected paths gone (%x)", + cqr->lpm); + } else if (cqr->lpm != device->path_data.opm) { + cqr->lpm = device->path_data.opm; + DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + "start_IO: selected paths gone," + " retry on all paths"); + } else { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "start_IO: all paths in opm gone," + " do path verification"); + dasd_generic_last_path_gone(device); + device->path_data.opm = 0; + device->path_data.ppm = 0; + device->path_data.npm = 0; + device->path_data.tbvpm = + ccw_device_get_path_mask(device->cdev); + } break; case -ENODEV: - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: -ENODEV device gone, retry"); break; case -EIO: - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: -EIO device gone, retry"); break; case -EINVAL: /* most likely caused in power management context */ - DBF_DEV_EVENT(DBF_DEBUG, device, "%s", + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "start_IO: -EINVAL device currently " "not accessible"); break; @@ -1068,24 +1630,30 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, return; } - now = get_clock(); - - /* check for unsolicited interrupts */ + now = get_tod_clock(); cqr = (struct dasd_ccw_req *) intparm; - if (!cqr || ((scsw_cc(&irb->scsw) == 1) && - (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && - (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) { - if (cqr && cqr->status == DASD_CQR_IN_IO) - cqr->status = DASD_CQR_QUEUED; + /* check for conditions that should be handled immediately */ + if (!cqr || + !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && + scsw_cstat(&irb->scsw) == 0)) { + if (cqr) + memcpy(&cqr->irb, irb, sizeof(*irb)); device = dasd_device_from_cdev_locked(cdev); - if (!IS_ERR(device)) { - dasd_device_clear_timer(device); - device->discipline->handle_unsolicited_interrupt(device, - irb); + if (IS_ERR(device)) + return; + /* ignore unsolicited interrupts for DIAG discipline */ + if (device->discipline == dasd_diag_discipline_pointer) { dasd_put_device(device); + return; } - return; + device->discipline->dump_sense_dbf(device, irb, "int"); + if (device->features & DASD_FEATURE_ERPLOG) + device->discipline->dump_sense(device, cqr, irb); + device->discipline->check_for_device_change(device, cqr, irb); + dasd_put_device(device); } + if (!cqr) + return; device = (struct dasd_device *) cqr->startdev; if (!device || @@ -1125,25 +1693,19 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, struct dasd_ccw_req, devlist); } } else { /* error */ - memcpy(&cqr->irb, irb, sizeof(struct irb)); - /* log sense for every failed I/O to s390 debugfeature */ - dasd_log_sense_dbf(cqr, irb); - if (device->features & DASD_FEATURE_ERPLOG) { - dasd_log_sense(cqr, irb); - } - /* * If we don't want complex ERP for this request, then just * reset this and retry it in the fastpath */ if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && cqr->retries > 0) { - if (cqr->lpm == LPM_ANYPATH) + if (cqr->lpm == device->path_data.opm) DBF_DEV_EVENT(DBF_DEBUG, device, "default ERP in fastpath " "(%i retries left)", cqr->retries); - cqr->lpm = LPM_ANYPATH; + if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) + cqr->lpm = device->path_data.opm; cqr->status = DASD_CQR_QUEUED; next = cqr; } else @@ -1161,6 +1723,29 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, dasd_schedule_device_bh(device); } +enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) +{ + struct dasd_device *device; + + device = dasd_device_from_cdev_locked(cdev); + + if (IS_ERR(device)) + goto out; + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || + device->state != device->target || + !device->discipline->check_for_device_change){ + dasd_put_device(device); + goto out; + } + if (device->discipline->dump_sense_dbf) + device->discipline->dump_sense_dbf(device, irb, "uc"); + device->discipline->check_for_device_change(device, NULL, irb); + dasd_put_device(device); +out: + return UC_TODO_RETRY; +} +EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); + /* * If we have an error on a dasd_block layer request then we cancel * and return all further requests from the same dasd_block as well. @@ -1200,11 +1785,11 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device, list_for_each_safe(l, n, &device->ccw_queue) { cqr = list_entry(l, struct dasd_ccw_req, devlist); - /* Stop list processing at the first non-final request. */ + /* Skip any non-final request. */ if (cqr->status == DASD_CQR_QUEUED || cqr->status == DASD_CQR_IN_IO || cqr->status == DASD_CQR_CLEAR_PENDING) - break; + continue; if (cqr->status == DASD_CQR_ERROR) { __dasd_device_recovery(device, cqr); } @@ -1273,17 +1858,24 @@ static void __dasd_device_check_expire(struct dasd_device *device) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { + if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* + * IO in safe offline processing should not + * run out of retries + */ + cqr->retries++; + } if (device->discipline->term_IO(cqr) != 0) { /* Hmpf, try again in 5 sec */ dev_err(&device->cdev->dev, - "cqr %p timed out (%is) but cannot be " + "cqr %p timed out (%lus) but cannot be " "ended, retrying in 5 s\n", cqr, (cqr->expires/HZ)); cqr->expires += 5*HZ; dasd_device_set_timer(device, 5*HZ); } else { dev_err(&device->cdev->dev, - "cqr %p timed out (%is), %i retries " + "cqr %p timed out (%lus), %i retries " "remaining\n", cqr, (cqr->expires/HZ), cqr->retries); } @@ -1304,8 +1896,14 @@ static void __dasd_device_start_head(struct dasd_device *device) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if (cqr->status != DASD_CQR_QUEUED) return; - /* when device is stopped, return request to previous layer */ - if (device->stopped) { + /* when device is stopped, return request to previous layer + * exception: only the disconnect or unresumed bits are set and the + * cqr is a path verification request + */ + if (device->stopped && + !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) + && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { + cqr->intrc = -EAGAIN; cqr->status = DASD_CQR_CLEARED; dasd_schedule_device_bh(device); return; @@ -1321,6 +1919,23 @@ static void __dasd_device_start_head(struct dasd_device *device) dasd_device_set_timer(device, 50); } +static void __dasd_device_check_path_events(struct dasd_device *device) +{ + int rc; + + if (device->path_data.tbvpm) { + if (device->stopped & ~(DASD_STOPPED_DC_WAIT | + DASD_UNRESUMED_PM)) + return; + rc = device->discipline->verify_path( + device, device->path_data.tbvpm); + if (rc) + dasd_device_set_timer(device, 50); + else + device->path_data.tbvpm = 0; + } +}; + /* * Go through all request on the dasd_device request queue, * terminate them on the cdev if necessary, and return them to the @@ -1355,7 +1970,7 @@ int dasd_flush_device_queue(struct dasd_device *device) } break; case DASD_CQR_QUEUED: - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->status = DASD_CQR_CLEARED; break; default: /* no need to modify the others */ @@ -1395,6 +2010,7 @@ static void dasd_device_tasklet(struct dasd_device *device) __dasd_device_check_expire(device); /* find final requests on ccw queue */ __dasd_device_process_ccw_queue(device, &final_queue); + __dasd_device_check_path_events(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); /* Now call the callback function of requests with final status */ __dasd_device_process_final_queue(device, &final_queue); @@ -1402,6 +2018,8 @@ static void dasd_device_tasklet(struct dasd_device *device) /* Now check if the head of the ccw queue needs to be started. */ __dasd_device_start_head(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); + if (waitqueue_active(&shutdown_waitq)) + wake_up(&shutdown_waitq); dasd_put_device(device); } @@ -1470,10 +2088,14 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr) /* * Wakeup helper for the 'sleep_on' functions. */ -static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) +void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) { - wake_up((wait_queue_head_t *) data); + spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); + cqr->callback_data = DASD_SLEEPON_END_TAG; + spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); + wake_up(&generic_waitq); } +EXPORT_SYMBOL_GPL(dasd_wakeup_cb); static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) { @@ -1482,10 +2104,7 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) device = cqr->startdev; spin_lock_irq(get_ccwdev_lock(device->cdev)); - rc = ((cqr->status == DASD_CQR_DONE || - cqr->status == DASD_CQR_NEED_ERP || - cqr->status == DASD_CQR_TERMINATED) && - list_empty(&cqr->devlist)); + rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; } @@ -1551,15 +2170,20 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) continue; if (cqr->status != DASD_CQR_FILLED) /* could be failed */ continue; - + if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EPERM; + continue; + } /* Non-temporary stop condition will trigger fail fast */ if (device->stopped & ~DASD_STOPPED_PENDING && test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && (!dasd_eer_enabled(device))) { cqr->status = DASD_CQR_FAILED; + cqr->intrc = -ENOLINK; continue; } - /* Don't try to start requests if device is stopped */ if (interruptible) { rc = wait_event_interruptible( @@ -1572,8 +2196,10 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) } else wait_event(generic_waitq, !(device->stopped)); - cqr->callback = dasd_wakeup_cb; - cqr->callback_data = (void *) &generic_waitq; + if (!cqr->callback) + cqr->callback = dasd_wakeup_cb; + + cqr->callback_data = DASD_SLEEPON_START_TAG; dasd_add_request_tail(cqr); if (interruptible) { rc = wait_event_interruptible( @@ -1591,7 +2217,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) wait_event(generic_waitq, _wait_for_wakeup(cqr)); } - maincqr->endclk = get_clock(); + maincqr->endclk = get_tod_clock(); if ((maincqr->status != DASD_CQR_DONE) && (maincqr->intrc != -ERESTARTSYS)) dasd_log_sense(maincqr, &maincqr->irb); @@ -1604,6 +2230,77 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) return rc; } +static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) +{ + struct dasd_ccw_req *cqr; + + list_for_each_entry(cqr, ccw_queue, blocklist) { + if (cqr->callback_data != DASD_SLEEPON_END_TAG) + return 0; + } + + return 1; +} + +static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) +{ + struct dasd_device *device; + int rc; + struct dasd_ccw_req *cqr, *n; + +retry: + list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { + device = cqr->startdev; + if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ + continue; + + if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EPERM; + continue; + } + /*Non-temporary stop condition will trigger fail fast*/ + if (device->stopped & ~DASD_STOPPED_PENDING && + test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && + !dasd_eer_enabled(device)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EAGAIN; + continue; + } + + /*Don't try to start requests if device is stopped*/ + if (interruptible) { + rc = wait_event_interruptible( + generic_waitq, !device->stopped); + if (rc == -ERESTARTSYS) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = rc; + continue; + } + } else + wait_event(generic_waitq, !(device->stopped)); + + if (!cqr->callback) + cqr->callback = dasd_wakeup_cb; + cqr->callback_data = DASD_SLEEPON_START_TAG; + dasd_add_request_tail(cqr); + } + + wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); + + rc = 0; + list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { + if (__dasd_sleep_on_erp(cqr)) + rc = 1; + } + if (rc) + goto retry; + + + return 0; +} + /* * Queue a request to the tail of the device ccw_queue and wait for * it's completion. @@ -1614,6 +2311,15 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr) } /* + * Start requests from a ccw_queue and wait for their completion. + */ +int dasd_sleep_on_queue(struct list_head *ccw_queue) +{ + return _dasd_sleep_on_queue(ccw_queue, 0); +} +EXPORT_SYMBOL(dasd_sleep_on_queue); + +/* * Queue a request to the tail of the device ccw_queue and wait * interruptible for it's completion. */ @@ -1631,11 +2337,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) static inline int _dasd_term_running_cqr(struct dasd_device *device) { struct dasd_ccw_req *cqr; + int rc; if (list_empty(&device->ccw_queue)) return 0; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); - return device->discipline->term_IO(cqr); + rc = device->discipline->term_IO(cqr); + if (!rc) + /* + * CQR terminated because a more important request is pending. + * Undo decreasing of retry counter because this is + * not an error case. + */ + cqr->retries++; + return rc; } int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) @@ -1644,17 +2359,26 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) int rc; device = cqr->startdev; + if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EPERM; + return -EIO; + } spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = _dasd_term_running_cqr(device); if (rc) { spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; } - cqr->callback = dasd_wakeup_cb; - cqr->callback_data = (void *) &generic_waitq; + cqr->callback_data = DASD_SLEEPON_START_TAG; cqr->status = DASD_CQR_QUEUED; - list_add(&cqr->devlist, &device->ccw_queue); + /* + * add new request as second + * first the terminated cqr needs to be finished + */ + list_add(&cqr->devlist, device->ccw_queue.next); /* let the bh start the request to keep them in order */ dasd_schedule_device_bh(device); @@ -1669,6 +2393,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) rc = cqr->intrc; else rc = -EIO; + + /* kick tasklets */ + dasd_schedule_device_bh(device); + if (device->block) + dasd_schedule_block_bh(device->block); + return rc; } @@ -1676,8 +2406,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) * Cancels a request that was started with dasd_sleep_on_req. * This is useful to timeout requests. The request will be * terminated if it is currently in i/o. - * Returns 1 if the request has been terminated. - * 0 if there was no need to terminate the request (not started yet) + * Returns 0 if request termination was successful * negative error code if termination failed * Cancellation of a request is an asynchronous operation! The calling * function has to wait until the request is properly returned via callback. @@ -1703,7 +2432,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) "Cancelling request %p failed with rc=%d\n", cqr, rc); } else { - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); } break; default: /* already finished or clear pending - do nothing */ @@ -1714,7 +2443,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) return rc; } - /* * SECTION: Operations of the dasd_block layer. */ @@ -1801,7 +2529,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) return; } /* Now we try to fetch requests from the request queue */ - while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { + while ((req = blk_peek_request(queue))) { if (basedev->features & DASD_FEATURE_READONLY && rq_data_dir(req) == WRITE) { DBF_DEV_EVENT(DBF_ERR, basedev, @@ -1811,6 +2539,16 @@ static void __dasd_process_request_queue(struct dasd_block *block) __blk_end_request_all(req, -EIO); continue; } + if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && + (basedev->features & DASD_FEATURE_FAILFAST || + blk_noretry_request(req))) { + DBF_DEV_EVENT(DBF_ERR, basedev, + "Rejecting failfast request %p", + req); + blk_start_request(req); + __blk_end_request_all(req, -ETIMEDOUT); + continue; + } cqr = basedev->discipline->build_cp(basedev, block, req); if (IS_ERR(cqr)) { if (PTR_ERR(cqr) == -EBUSY) @@ -1849,8 +2587,10 @@ static void __dasd_process_request_queue(struct dasd_block *block) */ cqr->callback_data = (void *) req; cqr->status = DASD_CQR_FILLED; + req->completion_data = cqr; blk_start_request(req); list_add_tail(&cqr->blocklist, &block->ccw_queue); + INIT_LIST_HEAD(&cqr->devlist); dasd_profile_start(block, cqr, req); } } @@ -1864,8 +2604,17 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) req = (struct request *) cqr->callback_data; dasd_profile_end(cqr->block, cqr, req); status = cqr->block->base->discipline->free_cp(cqr, req); - if (status <= 0) - error = status ? status : -EIO; + if (status < 0) + error = status; + else if (status == 0) { + if (cqr->intrc == -EPERM) + error = -EBADE; + else if (cqr->intrc == -ENOLINK || + cqr->intrc == -ETIMEDOUT) + error = cqr->intrc; + else + error = -EIO; + } __blk_end_request_all(req, error); } @@ -1899,7 +2648,8 @@ restart: /* Process requests that may be recovered */ if (cqr->status == DASD_CQR_NEED_ERP) { erp_fn = base->discipline->erp_action(cqr); - erp_fn(cqr); + if (IS_ERR(erp_fn(cqr))) + continue; goto restart; } @@ -1930,7 +2680,7 @@ restart: } /* Rechain finished requests to final queue */ - cqr->endclk = get_clock(); + cqr->endclk = get_tod_clock(); list_move_tail(&cqr->blocklist, final_queue); } } @@ -1953,11 +2703,19 @@ static void __dasd_block_start_head(struct dasd_block *block) list_for_each_entry(cqr, &block->ccw_queue, blocklist) { if (cqr->status != DASD_CQR_FILLED) continue; + if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && + !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { + cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EPERM; + dasd_schedule_block_bh(block); + continue; + } /* Non-temporary stop condition will trigger fail fast */ if (block->base->stopped & ~DASD_STOPPED_PENDING && test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && (!dasd_eer_enabled(block->base))) { cqr->status = DASD_CQR_FAILED; + cqr->intrc = -ENOLINK; dasd_schedule_block_bh(block); continue; } @@ -2007,6 +2765,8 @@ static void dasd_block_tasklet(struct dasd_block *block) __dasd_block_start_head(block); spin_unlock(&block->queue_lock); spin_unlock_irq(&block->request_queue_lock); + if (waitqueue_active(&shutdown_waitq)) + wake_up(&shutdown_waitq); dasd_put_device(block->base); } @@ -2016,6 +2776,26 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) } /* + * Requeue a request back to the block request queue + * only works for block requests + */ +static int _dasd_requeue_request(struct dasd_ccw_req *cqr) +{ + struct dasd_block *block = cqr->block; + struct request *req; + unsigned long flags; + + if (!block) + return -EINVAL; + spin_lock_irqsave(&block->queue_lock, flags); + req = (struct request *) cqr->callback_data; + blk_requeue_request(block->request_queue, req); + spin_unlock_irqrestore(&block->queue_lock, flags); + + return 0; +} + +/* * Go through all request on the dasd_block request queue, cancel them * on the respective dasd_device, and return them to the generic * block layer. @@ -2064,7 +2844,7 @@ restart_cb: } /* call the callback function */ spin_lock_irq(&block->request_queue_lock); - cqr->endclk = get_clock(); + cqr->endclk = get_tod_clock(); list_del_init(&cqr->blocklist); __dasd_cleanup_cqr(cqr); spin_unlock_irq(&block->request_queue_lock); @@ -2108,6 +2888,82 @@ static void do_dasd_request(struct request_queue *queue) } /* + * Block timeout callback, called from the block layer + * + * request_queue lock is held on entry. + * + * Return values: + * BLK_EH_RESET_TIMER if the request should be left running + * BLK_EH_NOT_HANDLED if the request is handled or terminated + * by the driver. + */ +enum blk_eh_timer_return dasd_times_out(struct request *req) +{ + struct dasd_ccw_req *cqr = req->completion_data; + struct dasd_block *block = req->q->queuedata; + struct dasd_device *device; + int rc = 0; + + if (!cqr) + return BLK_EH_NOT_HANDLED; + + device = cqr->startdev ? cqr->startdev : block->base; + if (!device->blk_timeout) + return BLK_EH_RESET_TIMER; + DBF_DEV_EVENT(DBF_WARNING, device, + " dasd_times_out cqr %p status %x", + cqr, cqr->status); + + spin_lock(&block->queue_lock); + spin_lock(get_ccwdev_lock(device->cdev)); + cqr->retries = -1; + cqr->intrc = -ETIMEDOUT; + if (cqr->status >= DASD_CQR_QUEUED) { + spin_unlock(get_ccwdev_lock(device->cdev)); + rc = dasd_cancel_req(cqr); + } else if (cqr->status == DASD_CQR_FILLED || + cqr->status == DASD_CQR_NEED_ERP) { + cqr->status = DASD_CQR_TERMINATED; + spin_unlock(get_ccwdev_lock(device->cdev)); + } else if (cqr->status == DASD_CQR_IN_ERP) { + struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; + + list_for_each_entry_safe(searchcqr, nextcqr, + &block->ccw_queue, blocklist) { + tmpcqr = searchcqr; + while (tmpcqr->refers) + tmpcqr = tmpcqr->refers; + if (tmpcqr != cqr) + continue; + /* searchcqr is an ERP request for cqr */ + searchcqr->retries = -1; + searchcqr->intrc = -ETIMEDOUT; + if (searchcqr->status >= DASD_CQR_QUEUED) { + spin_unlock(get_ccwdev_lock(device->cdev)); + rc = dasd_cancel_req(searchcqr); + spin_lock(get_ccwdev_lock(device->cdev)); + } else if ((searchcqr->status == DASD_CQR_FILLED) || + (searchcqr->status == DASD_CQR_NEED_ERP)) { + searchcqr->status = DASD_CQR_TERMINATED; + rc = 0; + } else if (searchcqr->status == DASD_CQR_IN_ERP) { + /* + * Shouldn't happen; most recent ERP + * request is at the front of queue + */ + continue; + } + break; + } + spin_unlock(get_ccwdev_lock(device->cdev)); + } + dasd_schedule_block_bh(block); + spin_unlock(&block->queue_lock); + + return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; +} + +/* * Allocate and initialize request queue and default I/O scheduler. */ static int dasd_alloc_queue(struct dasd_block *block) @@ -2123,12 +2979,12 @@ static int dasd_alloc_queue(struct dasd_block *block) elevator_exit(block->request_queue->elevator); block->request_queue->elevator = NULL; + mutex_lock(&block->request_queue->sysfs_lock); rc = elevator_init(block->request_queue, "deadline"); - if (rc) { + if (rc) blk_cleanup_queue(block->request_queue); - return rc; - } - return 0; + mutex_unlock(&block->request_queue->sysfs_lock); + return rc; } /* @@ -2138,8 +2994,20 @@ static void dasd_setup_queue(struct dasd_block *block) { int max; - blk_queue_logical_block_size(block->request_queue, block->bp_block); - max = block->base->discipline->max_blocks << block->s2b_shift; + if (block->base->features & DASD_FEATURE_USERAW) { + /* + * the max_blocks value for raw_track access is 256 + * it is higher than the native ECKD value because we + * only need one ccw per track + * so the max_hw_sectors are + * 2048 x 512B = 1024kB = 16 tracks + */ + max = 2048; + } else { + max = block->base->discipline->max_blocks << block->s2b_shift; + } + blk_queue_logical_block_size(block->request_queue, + block->bp_block); blk_queue_max_hw_sectors(block->request_queue, max); blk_queue_max_segments(block->request_queue, -1L); /* with page sized segments we can translate each segement into @@ -2147,7 +3015,6 @@ static void dasd_setup_queue(struct dasd_block *block) */ blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); - blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); } /* @@ -2179,15 +3046,14 @@ static void dasd_flush_request_queue(struct dasd_block *block) static int dasd_open(struct block_device *bdev, fmode_t mode) { - struct dasd_block *block = bdev->bd_disk->private_data; struct dasd_device *base; int rc; - if (!block) + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) return -ENODEV; - base = block->base; - atomic_inc(&block->open_count); + atomic_inc(&base->block->open_count); if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { rc = -ENODEV; goto unlock; @@ -2220,22 +3086,25 @@ static int dasd_open(struct block_device *bdev, fmode_t mode) goto out; } + dasd_put_device(base); return 0; out: module_put(base->discipline->owner); unlock: - atomic_dec(&block->open_count); + atomic_dec(&base->block->open_count); + dasd_put_device(base); return rc; } -static int dasd_release(struct gendisk *disk, fmode_t mode) +static void dasd_release(struct gendisk *disk, fmode_t mode) { - struct dasd_block *block = disk->private_data; - - atomic_dec(&block->open_count); - module_put(block->base->discipline->owner); - return 0; + struct dasd_device *base = dasd_device_from_gendisk(disk); + if (base) { + atomic_dec(&base->block->open_count); + module_put(base->discipline->owner); + dasd_put_device(base); + } } /* @@ -2243,20 +3112,20 @@ static int dasd_release(struct gendisk *disk, fmode_t mode) */ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { - struct dasd_block *block; struct dasd_device *base; - block = bdev->bd_disk->private_data; - if (!block) + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) return -ENODEV; - base = block->base; if (!base->discipline || - !base->discipline->fill_geometry) + !base->discipline->fill_geometry) { + dasd_put_device(base); return -EINVAL; - - base->discipline->fill_geometry(block, geo); - geo->start = get_start_sect(bdev) >> block->s2b_shift; + } + base->discipline->fill_geometry(base->block, geo); + geo->start = get_start_sect(bdev) >> base->block->s2b_shift; + dasd_put_device(base); return 0; } @@ -2291,6 +3160,7 @@ dasd_exit(void) debug_unregister(dasd_debug_area); dasd_debug_area = NULL; } + dasd_statistics_removeroot(); } /* @@ -2376,13 +3246,16 @@ void dasd_generic_remove(struct ccw_device *cdev) cdev->handler = NULL; - dasd_remove_sysfs_files(cdev); device = dasd_device_from_cdev(cdev); - if (IS_ERR(device)) + if (IS_ERR(device)) { + dasd_remove_sysfs_files(cdev); return; - if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { + } + if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && + !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { /* Already doing offline processing */ dasd_put_device(device); + dasd_remove_sysfs_files(cdev); return; } /* @@ -2393,7 +3266,6 @@ void dasd_generic_remove(struct ccw_device *cdev) dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ block = device->block; - device->block = NULL; dasd_delete_device(device); /* * life cycle of block is bound to device, so delete it after @@ -2401,6 +3273,8 @@ void dasd_generic_remove(struct ccw_device *cdev) */ if (block) dasd_free_block(block); + + dasd_remove_sysfs_files(cdev); } /* @@ -2479,16 +3353,13 @@ int dasd_generic_set_offline(struct ccw_device *cdev) { struct dasd_device *device; struct dasd_block *block; - int max_count, open_count; + int max_count, open_count, rc; + rc = 0; device = dasd_device_from_cdev(cdev); if (IS_ERR(device)) return PTR_ERR(device); - if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { - /* Already doing offline processing */ - dasd_put_device(device); - return 0; - } + /* * We must make sure that this device is currently not in use. * The open_count is increased for every opener, that includes @@ -2512,10 +3383,57 @@ int dasd_generic_set_offline(struct ccw_device *cdev) return -EBUSY; } } + + if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* + * safe offline already running + * could only be called by normal offline so safe_offline flag + * needs to be removed to run normal offline and kill all I/O + */ + if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { + /* Already doing normal offline processing */ + dasd_put_device(device); + return -EBUSY; + } else + clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); + + } else + if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { + /* Already doing offline processing */ + dasd_put_device(device); + return -EBUSY; + } + + /* + * if safe_offline called set safe_offline_running flag and + * clear safe_offline so that a call to normal offline + * can overrun safe_offline processing + */ + if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && + !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* + * If we want to set the device safe offline all IO operations + * should be finished before continuing the offline process + * so sync bdev first and then wait for our queues to become + * empty + */ + /* sync blockdev and partitions */ + rc = fsync_bdev(device->block->bdev); + if (rc != 0) + goto interrupted; + + /* schedule device tasklet and wait for completion */ + dasd_schedule_device_bh(device); + rc = wait_event_interruptible(shutdown_waitq, + _wait_for_empty_queues(device)); + if (rc != 0) + goto interrupted; + } + + set_bit(DASD_FLAG_OFFLINE, &device->flags); dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ block = device->block; - device->block = NULL; dasd_delete_device(device); /* * life cycle of block is bound to device, so delete it after @@ -2524,12 +3442,63 @@ int dasd_generic_set_offline(struct ccw_device *cdev) if (block) dasd_free_block(block); return 0; + +interrupted: + /* interrupted by signal */ + clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); + clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); + clear_bit(DASD_FLAG_OFFLINE, &device->flags); + dasd_put_device(device); + return rc; } +int dasd_generic_last_path_gone(struct dasd_device *device) +{ + struct dasd_ccw_req *cqr; + + dev_warn(&device->cdev->dev, "No operational channel path is left " + "for the device\n"); + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); + /* First of all call extended error reporting. */ + dasd_eer_write(device, NULL, DASD_EER_NOPATH); + + if (device->state < DASD_STATE_BASIC) + return 0; + /* Device is active. We want to keep it. */ + list_for_each_entry(cqr, &device->ccw_queue, devlist) + if ((cqr->status == DASD_CQR_IN_IO) || + (cqr->status == DASD_CQR_CLEAR_PENDING)) { + cqr->status = DASD_CQR_QUEUED; + cqr->retries++; + } + dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); + dasd_device_clear_timer(device); + dasd_schedule_device_bh(device); + return 1; +} +EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); + +int dasd_generic_path_operational(struct dasd_device *device) +{ + dev_info(&device->cdev->dev, "A channel path to the device has become " + "operational\n"); + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); + dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); + if (device->stopped & DASD_UNRESUMED_PM) { + dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); + dasd_restore_device(device); + return 1; + } + dasd_schedule_device_bh(device); + if (device->block) + dasd_schedule_block_bh(device->block); + return 1; +} +EXPORT_SYMBOL_GPL(dasd_generic_path_operational); + int dasd_generic_notify(struct ccw_device *cdev, int event) { struct dasd_device *device; - struct dasd_ccw_req *cqr; int ret; device = dasd_device_from_cdev_locked(cdev); @@ -2540,53 +3509,109 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) case CIO_GONE: case CIO_BOXED: case CIO_NO_PATH: - /* First of all call extended error reporting. */ - dasd_eer_write(device, NULL, DASD_EER_NOPATH); - - if (device->state < DASD_STATE_BASIC) - break; - /* Device is active. We want to keep it. */ - list_for_each_entry(cqr, &device->ccw_queue, devlist) - if (cqr->status == DASD_CQR_IN_IO) { - cqr->status = DASD_CQR_QUEUED; - cqr->retries++; - } - dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); - dasd_device_clear_timer(device); - dasd_schedule_device_bh(device); - ret = 1; + device->path_data.opm = 0; + device->path_data.ppm = 0; + device->path_data.npm = 0; + ret = dasd_generic_last_path_gone(device); break; case CIO_OPER: - /* FIXME: add a sanity check. */ - dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); - if (device->stopped & DASD_UNRESUMED_PM) { - dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); - dasd_restore_device(device); - ret = 1; - break; - } - dasd_schedule_device_bh(device); - if (device->block) - dasd_schedule_block_bh(device->block); ret = 1; + if (device->path_data.opm) + ret = dasd_generic_path_operational(device); break; } dasd_put_device(device); return ret; } +void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) +{ + int chp; + __u8 oldopm, eventlpm; + struct dasd_device *device; + + device = dasd_device_from_cdev_locked(cdev); + if (IS_ERR(device)) + return; + for (chp = 0; chp < 8; chp++) { + eventlpm = 0x80 >> chp; + if (path_event[chp] & PE_PATH_GONE) { + oldopm = device->path_data.opm; + device->path_data.opm &= ~eventlpm; + device->path_data.ppm &= ~eventlpm; + device->path_data.npm &= ~eventlpm; + if (oldopm && !device->path_data.opm) { + dev_warn(&device->cdev->dev, + "No verified channel paths remain " + "for the device\n"); + DBF_DEV_EVENT(DBF_WARNING, device, + "%s", "last verified path gone"); + dasd_eer_write(device, NULL, DASD_EER_NOPATH); + dasd_device_set_stop_bits(device, + DASD_STOPPED_DC_WAIT); + } + } + if (path_event[chp] & PE_PATH_AVAILABLE) { + device->path_data.opm &= ~eventlpm; + device->path_data.ppm &= ~eventlpm; + device->path_data.npm &= ~eventlpm; + device->path_data.tbvpm |= eventlpm; + dasd_schedule_device_bh(device); + } + if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { + if (!(device->path_data.opm & eventlpm) && + !(device->path_data.tbvpm & eventlpm)) { + /* + * we can not establish a pathgroup on an + * unavailable path, so trigger a path + * verification first + */ + device->path_data.tbvpm |= eventlpm; + dasd_schedule_device_bh(device); + } + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "Pathgroup re-established\n"); + if (device->discipline->kick_validate) + device->discipline->kick_validate(device); + } + } + dasd_put_device(device); +} +EXPORT_SYMBOL_GPL(dasd_generic_path_event); + +int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) +{ + if (!device->path_data.opm && lpm) { + device->path_data.opm = lpm; + dasd_generic_path_operational(device); + } else + device->path_data.opm |= lpm; + return 0; +} +EXPORT_SYMBOL_GPL(dasd_generic_verify_path); + + int dasd_generic_pm_freeze(struct ccw_device *cdev) { + struct dasd_device *device = dasd_device_from_cdev(cdev); + struct list_head freeze_queue; struct dasd_ccw_req *cqr, *n; + struct dasd_ccw_req *refers; int rc; - struct list_head freeze_queue; - struct dasd_device *device = dasd_device_from_cdev(cdev); if (IS_ERR(device)) return PTR_ERR(device); + + /* mark device as suspended */ + set_bit(DASD_FLAG_SUSPENDED, &device->flags); + + if (device->discipline->freeze) + rc = device->discipline->freeze(device); + /* disallow new I/O */ dasd_device_set_stop_bits(device, DASD_STOPPED_PM); - /* clear active requests */ + + /* clear active requests and requeue them to block layer if possible */ INIT_LIST_HEAD(&freeze_queue); spin_lock_irq(get_ccwdev_lock(cdev)); rc = 0; @@ -2606,7 +3631,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) } list_move_tail(&cqr->devlist, &freeze_queue); } - spin_unlock_irq(get_ccwdev_lock(cdev)); list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { @@ -2614,15 +3638,38 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) (cqr->status != DASD_CQR_CLEAR_PENDING)); if (cqr->status == DASD_CQR_CLEARED) cqr->status = DASD_CQR_QUEUED; - } - /* move freeze_queue to start of the ccw_queue */ - spin_lock_irq(get_ccwdev_lock(cdev)); - list_splice_tail(&freeze_queue, &device->ccw_queue); - spin_unlock_irq(get_ccwdev_lock(cdev)); - if (device->discipline->freeze) - rc = device->discipline->freeze(device); + /* requeue requests to blocklayer will only work for + block device requests */ + if (_dasd_requeue_request(cqr)) + continue; + /* remove requests from device and block queue */ + list_del_init(&cqr->devlist); + while (cqr->refers != NULL) { + refers = cqr->refers; + /* remove the request from the block queue */ + list_del(&cqr->blocklist); + /* free the finished erp request */ + dasd_free_erp_request(cqr, cqr->memdev); + cqr = refers; + } + if (cqr->block) + list_del_init(&cqr->blocklist); + cqr->block->base->discipline->free_cp( + cqr, (struct request *) cqr->callback_data); + } + + /* + * if requests remain then they are internal request + * and go back to the device queue + */ + if (!list_empty(&freeze_queue)) { + /* move freeze_queue to start of the ccw_queue */ + spin_lock_irq(get_ccwdev_lock(cdev)); + list_splice_tail(&freeze_queue, &device->ccw_queue); + spin_unlock_irq(get_ccwdev_lock(cdev)); + } dasd_put_device(device); return rc; } @@ -2658,6 +3705,7 @@ int dasd_generic_restore_device(struct ccw_device *cdev) if (device->block) dasd_schedule_block_bh(device->block); + clear_bit(DASD_FLAG_SUSPENDED, &device->flags); dasd_put_device(device); return 0; } @@ -2699,7 +3747,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, cqr->memdev = device; cqr->expires = 10*HZ; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -2756,6 +3804,23 @@ char *dasd_get_sense(struct irb *irb) } EXPORT_SYMBOL_GPL(dasd_get_sense); +void dasd_generic_shutdown(struct ccw_device *cdev) +{ + struct dasd_device *device; + + device = dasd_device_from_cdev(cdev); + if (IS_ERR(device)) + return; + + if (device->block) + dasd_schedule_block_bh(device->block); + + dasd_schedule_device_bh(device); + + wait_event(shutdown_waitq, _wait_for_empty_queues(device)); +} +EXPORT_SYMBOL_GPL(dasd_generic_shutdown); + static int __init dasd_init(void) { int rc; @@ -2763,6 +3828,7 @@ static int __init dasd_init(void) init_waitqueue_head(&dasd_init_waitq); init_waitqueue_head(&dasd_flush_wq); init_waitqueue_head(&generic_waitq); + init_waitqueue_head(&shutdown_waitq); /* register 'common' DASD debug area, used for all DBF_XXX calls */ dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); @@ -2777,6 +3843,8 @@ static int __init dasd_init(void) dasd_diag_discipline_pointer = NULL; + dasd_statistics_createroot(); + rc = dasd_devmap_init(); if (rc) goto failed; diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 51224f76b98..d2613471368 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -1,16 +1,14 @@ /* - * File...........: linux/drivers/s390/block/dasd_3990_erp.c * Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com> * Holger Smolinski <Holger.Smolinski@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001 + * Copyright IBM Corp. 2000, 2001 * */ #define KMSG_COMPONENT "dasd-eckd" #include <linux/timer.h> -#include <linux/slab.h> #include <asm/idals.h> #define PRINTK_HEADER "dasd_erp(3990): " @@ -153,9 +151,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); opm = ccw_device_get_path_mask(device->cdev); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); - //FIXME: start with get_opm ? if (erp->lpm == 0) - erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); + erp->lpm = device->path_data.opm & + ~(erp->irb.esw.esw0.sublog.lpum); else erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); @@ -222,6 +220,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) ccw->cmd_code = CCW_CMD_DCTL; ccw->count = 4; ccw->cda = (__u32)(addr_t) DCTL_data; + dctl_cqr->flags = erp->flags; dctl_cqr->function = dasd_3990_erp_DCTL; dctl_cqr->refers = erp; dctl_cqr->startdev = device; @@ -230,7 +229,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) dctl_cqr->expires = 5 * 60 * HZ; dctl_cqr->retries = 2; - dctl_cqr->buildclk = get_clock(); + dctl_cqr->buildclk = get_tod_clock(); dctl_cqr->status = DASD_CQR_FILLED; @@ -270,10 +269,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp) { erp->function = dasd_3990_erp_action_1; dasd_3990_erp_alternate_path(erp); - if (erp->status == DASD_CQR_FAILED) { + if (erp->status == DASD_CQR_FAILED && + !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { erp->status = DASD_CQR_FILLED; erp->retries = 10; - erp->lpm = LPM_ANYPATH; + erp->lpm = erp->startdev->path_data.opm; erp->function = dasd_3990_erp_action_1_sec; } return erp; @@ -1419,9 +1419,29 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias( struct dasd_ccw_req *erp) { struct dasd_ccw_req *cqr = erp->refers; + char *sense; if (cqr->block && (cqr->block->base != cqr->startdev)) { + + sense = dasd_get_sense(&erp->refers->irb); + /* + * dynamic pav may have changed base alias mapping + */ + if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense + && (sense[0] == 0x10) && (sense[7] == 0x0F) + && (sense[8] == 0x67)) { + /* + * remove device from alias handling to prevent new + * requests from being scheduled on the + * wrong alias device + */ + dasd_alias_remove_device(cqr->startdev); + + /* schedule worker to reload device */ + dasd_reload_device(cqr->startdev); + } + if (cqr->startdev->features & DASD_FEATURE_ERPLOG) { DBF_DEV_EVENT(DBF_ERR, cqr->startdev, "ERP on alias device for request %p," @@ -1691,14 +1711,15 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) ccw->cda = cpa; /* fill erp related fields */ + erp->flags = default_erp->flags; erp->function = dasd_3990_erp_action_1B_32; erp->refers = default_erp->refers; erp->startdev = device; erp->memdev = device; erp->magic = default_erp->magic; - erp->expires = 0; + erp->expires = default_erp->expires; erp->retries = 256; - erp->buildclk = get_clock(); + erp->buildclk = get_tod_clock(); erp->status = DASD_CQR_FILLED; /* remove the default erp */ @@ -1886,15 +1907,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense) static void dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) { - if (sense[25] & DASD_SENSE_BIT_3) { dasd_3990_erp_alternate_path(erp); - if (erp->status == DASD_CQR_FAILED) { + if (erp->status == DASD_CQR_FAILED && + !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) { /* reset the lpm and the status to be able to * try further actions. */ - - erp->lpm = 0; + erp->lpm = erp->startdev->path_data.opm; erp->status = DASD_CQR_NEED_ERP; } } @@ -2178,7 +2198,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) /* ***************************************************************************** - * main ERP control fuctions (24 and 32 byte sense) + * main ERP control functions (24 and 32 byte sense) ***************************************************************************** */ @@ -2186,7 +2206,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) * DASD_3990_ERP_CONTROL_CHECK * * DESCRIPTION - * Does a generic inspection if a control check occured and sets up + * Does a generic inspection if a control check occurred and sets up * the related error recovery procedure * * PARAMETER @@ -2229,7 +2249,7 @@ dasd_3990_erp_inspect(struct dasd_ccw_req *erp) struct dasd_ccw_req *erp_new = NULL; char *sense; - /* if this problem occured on an alias retry on base */ + /* if this problem occurred on an alias retry on base */ erp_new = dasd_3990_erp_inspect_alias(erp); if (erp_new) return erp_new; @@ -2261,7 +2281,7 @@ dasd_3990_erp_inspect(struct dasd_ccw_req *erp) * DASD_3990_ERP_ADD_ERP * * DESCRIPTION - * This funtion adds an additional request block (ERP) to the head of + * This function adds an additional request block (ERP) to the head of * the given cqr (or erp). * For a command mode cqr the erp is initialized as an default erp * (retry TIC). @@ -2287,7 +2307,8 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) if (cqr->cpmode == 1) { cplength = 0; - datasize = sizeof(struct tcw) + sizeof(struct tsb); + /* TCW needs to be 64 byte aligned, so leave enough room */ + datasize = 64 + sizeof(struct tcw) + sizeof(struct tsb); } else { cplength = 2; datasize = 0; @@ -2301,7 +2322,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) DBF_DEV_EVENT(DBF_ERR, device, "%s", "Unable to allocate ERP request"); cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock (); + cqr->stopclk = get_tod_clock(); } else { DBF_DEV_EVENT(DBF_ERR, device, "Unable to allocate ERP request " @@ -2309,15 +2330,15 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) cqr->retries); dasd_block_set_timer(device->block, (HZ << 3)); } - return cqr; + return erp; } ccw = cqr->cpaddr; if (cqr->cpmode == 1) { /* make a shallow copy of the original tcw but set new tsb */ erp->cpmode = 1; - erp->cpaddr = erp->data; - tcw = erp->data; + erp->cpaddr = PTR_ALIGN(erp->data, 64); + tcw = erp->cpaddr; tsb = (struct tsb *) &tcw[1]; *tcw = *((struct tcw *)cqr->cpaddr); tcw->tsb = (long)tsb; @@ -2334,15 +2355,16 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) ccw->cda = (long)(cqr->cpaddr); } + erp->flags = cqr->flags; erp->function = dasd_3990_erp_add_erp; erp->refers = cqr; erp->startdev = device; erp->memdev = device; erp->block = cqr->block; erp->magic = cqr->magic; - erp->expires = 0; + erp->expires = cqr->expires; erp->retries = 256; - erp->buildclk = get_clock(); + erp->buildclk = get_tod_clock(); erp->status = DASD_CQR_FILLED; return erp; @@ -2372,6 +2394,9 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr) /* add erp and initialize with default TIC */ erp = dasd_3990_erp_add_erp(cqr); + if (IS_ERR(erp)) + return erp; + /* inspect sense, determine specific ERP if possible */ if (erp != cqr) { @@ -2711,6 +2736,8 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) if (erp == NULL) { /* no matching erp found - set up erp */ erp = dasd_3990_erp_additional_erp(cqr); + if (IS_ERR(erp)) + return erp; } else { /* matching erp found - set all leading erp's to DONE */ erp = dasd_3990_erp_handle_match_erp(cqr, erp); diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 148b1dd2407..a2597e683e7 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -1,13 +1,14 @@ /* * PAV alias management for the DASD ECKD discipline * - * Copyright IBM Corporation, 2007 + * Copyright IBM Corp. 2007 * Author(s): Stefan Weinhuber <wein@de.ibm.com> */ #define KMSG_COMPONENT "dasd-eckd" #include <linux/list.h> +#include <linux/slab.h> #include <asm/ebcdic.h> #include "dasd_int.h" #include "dasd_eckd.h" @@ -188,48 +189,44 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device) unsigned long flags; struct alias_server *server, *newserver; struct alias_lcu *lcu, *newlcu; - int is_lcu_known; - struct dasd_uid *uid; + struct dasd_uid uid; private = (struct dasd_eckd_private *) device->private; - uid = &private->uid; + + device->discipline->get_uid(device, &uid); spin_lock_irqsave(&aliastree.lock, flags); - is_lcu_known = 1; - server = _find_server(uid); + server = _find_server(&uid); if (!server) { spin_unlock_irqrestore(&aliastree.lock, flags); - newserver = _allocate_server(uid); + newserver = _allocate_server(&uid); if (IS_ERR(newserver)) return PTR_ERR(newserver); spin_lock_irqsave(&aliastree.lock, flags); - server = _find_server(uid); + server = _find_server(&uid); if (!server) { list_add(&newserver->server, &aliastree.serverlist); server = newserver; - is_lcu_known = 0; } else { /* someone was faster */ _free_server(newserver); } } - lcu = _find_lcu(server, uid); + lcu = _find_lcu(server, &uid); if (!lcu) { spin_unlock_irqrestore(&aliastree.lock, flags); - newlcu = _allocate_lcu(uid); + newlcu = _allocate_lcu(&uid); if (IS_ERR(newlcu)) return PTR_ERR(newlcu); spin_lock_irqsave(&aliastree.lock, flags); - lcu = _find_lcu(server, uid); + lcu = _find_lcu(server, &uid); if (!lcu) { list_add(&newlcu->lcu, &server->lculist); lcu = newlcu; - is_lcu_known = 0; } else { /* someone was faster */ _free_lcu(newlcu); } - is_lcu_known = 0; } spin_lock(&lcu->lock); list_add(&device->alias_list, &lcu->inactive_devices); @@ -237,68 +234,7 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device) spin_unlock(&lcu->lock); spin_unlock_irqrestore(&aliastree.lock, flags); - return is_lcu_known; -} - -/* - * The first device to be registered on an LCU will have to do - * some additional setup steps to configure that LCU on the - * storage server. All further devices should wait with their - * initialization until the first device is done. - * To synchronize this work, the first device will call - * dasd_alias_lcu_setup_complete when it is done, and all - * other devices will wait for it with dasd_alias_wait_for_lcu_setup. - */ -void dasd_alias_lcu_setup_complete(struct dasd_device *device) -{ - struct dasd_eckd_private *private; - unsigned long flags; - struct alias_server *server; - struct alias_lcu *lcu; - struct dasd_uid *uid; - - private = (struct dasd_eckd_private *) device->private; - uid = &private->uid; - lcu = NULL; - spin_lock_irqsave(&aliastree.lock, flags); - server = _find_server(uid); - if (server) - lcu = _find_lcu(server, uid); - spin_unlock_irqrestore(&aliastree.lock, flags); - if (!lcu) { - DBF_EVENT_DEVID(DBF_ERR, device->cdev, - "could not find lcu for %04x %02x", - uid->ssid, uid->real_unit_addr); - WARN_ON(1); - return; - } - complete_all(&lcu->lcu_setup); -} - -void dasd_alias_wait_for_lcu_setup(struct dasd_device *device) -{ - struct dasd_eckd_private *private; - unsigned long flags; - struct alias_server *server; - struct alias_lcu *lcu; - struct dasd_uid *uid; - - private = (struct dasd_eckd_private *) device->private; - uid = &private->uid; - lcu = NULL; - spin_lock_irqsave(&aliastree.lock, flags); - server = _find_server(uid); - if (server) - lcu = _find_lcu(server, uid); - spin_unlock_irqrestore(&aliastree.lock, flags); - if (!lcu) { - DBF_EVENT_DEVID(DBF_ERR, device->cdev, - "could not find lcu for %04x %02x", - uid->ssid, uid->real_unit_addr); - WARN_ON(1); - return; - } - wait_for_completion(&lcu->lcu_setup); + return 0; } /* @@ -313,9 +249,14 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) struct alias_lcu *lcu; struct alias_server *server; int was_pending; + struct dasd_uid uid; private = (struct dasd_eckd_private *) device->private; lcu = private->lcu; + /* nothing to do if already disconnected */ + if (!lcu) + return; + device->discipline->get_uid(device, &uid); spin_lock_irqsave(&lcu->lock, flags); list_del_init(&device->alias_list); /* make sure that the workers don't use this device */ @@ -352,7 +293,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) _schedule_lcu_update(lcu, NULL); spin_unlock(&lcu->lock); } - server = _find_server(&private->uid); + server = _find_server(&uid); if (server && list_empty(&server->lculist)) { list_del(&server->server); _free_server(server); @@ -365,19 +306,30 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) * in the lcu is up to date and will update the device uid before * adding it to a pav group. */ + static int _add_device_to_lcu(struct alias_lcu *lcu, - struct dasd_device *device) + struct dasd_device *device, + struct dasd_device *pos) { struct dasd_eckd_private *private; struct alias_pav_group *group; - struct dasd_uid *uid; + struct dasd_uid uid; + unsigned long flags; private = (struct dasd_eckd_private *) device->private; - uid = &private->uid; - uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type; - uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua; - dasd_set_uid(device->cdev, &private->uid); + + /* only lock if not already locked */ + if (device != pos) + spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags, + CDEV_NESTED_SECOND); + private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type; + private->uid.base_unit_addr = + lcu->uac->unit[private->uid.real_unit_addr].base_ua; + uid = private->uid; + + if (device != pos) + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); /* if we have no PAV anyway, we don't need to bother with PAV groups */ if (lcu->pav == NO_PAV) { @@ -385,25 +337,25 @@ static int _add_device_to_lcu(struct alias_lcu *lcu, return 0; } - group = _find_group(lcu, uid); + group = _find_group(lcu, &uid); if (!group) { group = kzalloc(sizeof(*group), GFP_ATOMIC); if (!group) return -ENOMEM; - memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor)); - memcpy(group->uid.serial, uid->serial, sizeof(uid->serial)); - group->uid.ssid = uid->ssid; - if (uid->type == UA_BASE_DEVICE) - group->uid.base_unit_addr = uid->real_unit_addr; + memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor)); + memcpy(group->uid.serial, uid.serial, sizeof(uid.serial)); + group->uid.ssid = uid.ssid; + if (uid.type == UA_BASE_DEVICE) + group->uid.base_unit_addr = uid.real_unit_addr; else - group->uid.base_unit_addr = uid->base_unit_addr; - memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit)); + group->uid.base_unit_addr = uid.base_unit_addr; + memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit)); INIT_LIST_HEAD(&group->group); INIT_LIST_HEAD(&group->baselist); INIT_LIST_HEAD(&group->aliaslist); list_add(&group->group, &lcu->grouplist); } - if (uid->type == UA_BASE_DEVICE) + if (uid.type == UA_BASE_DEVICE) list_move(&device->alias_list, &group->baselist); else list_move(&device->alias_list, &group->aliaslist); @@ -432,6 +384,29 @@ static void _remove_device_from_lcu(struct alias_lcu *lcu, group->next = NULL; }; +static int +suborder_not_supported(struct dasd_ccw_req *cqr) +{ + char *sense; + char reason; + char msg_format; + char msg_no; + + sense = dasd_get_sense(&cqr->irb); + if (!sense) + return 0; + + reason = sense[0]; + msg_format = (sense[7] & 0xF0); + msg_no = (sense[7] & 0x0F); + + /* command reject, Format 0 MSG 4 - invalid parameter */ + if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04)) + return 1; + + return 0; +} + static int read_unit_address_configuration(struct dasd_device *device, struct alias_lcu *lcu) { @@ -473,7 +448,7 @@ static int read_unit_address_configuration(struct dasd_device *device, ccw->count = sizeof(*(lcu->uac)); ccw->cda = (__u32)(addr_t) lcu->uac; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; /* need to unset flag here to detect race with summary unit check */ @@ -483,6 +458,8 @@ static int read_unit_address_configuration(struct dasd_device *device, do { rc = dasd_sleep_on(cqr); + if (rc && suborder_not_supported(cqr)) + return -EOPNOTSUPP; } while (rc && (cqr->retries > 0)); if (rc) { spin_lock_irqsave(&lcu->lock, flags); @@ -524,7 +501,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) if (rc) return rc; - spin_lock_irqsave(&lcu->lock, flags); + /* need to take cdev lock before lcu lock */ + spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags, + CDEV_NESTED_FIRST); + spin_lock(&lcu->lock); lcu->pav = NO_PAV; for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { switch (lcu->uac->unit[i].ua_type) { @@ -541,9 +521,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) list_for_each_entry_safe(device, tempdev, &lcu->active_devices, alias_list) { - _add_device_to_lcu(lcu, device); + _add_device_to_lcu(lcu, device, refdev); } - spin_unlock_irqrestore(&lcu->lock, flags); + spin_unlock(&lcu->lock); + spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags); return 0; } @@ -565,7 +546,7 @@ static void lcu_update_work(struct work_struct *work) * processing the data */ spin_lock_irqsave(&lcu->lock, flags); - if (rc || (lcu->flags & NEED_UAC_UPDATE)) { + if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { DBF_DEV_EVENT(DBF_WARNING, device, "could not update" " alias data in lcu (rc = %d), retry later", rc); schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); @@ -627,9 +608,12 @@ int dasd_alias_add_device(struct dasd_device *device) private = (struct dasd_eckd_private *) device->private; lcu = private->lcu; rc = 0; - spin_lock_irqsave(&lcu->lock, flags); + + /* need to take cdev lock before lcu lock */ + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + spin_lock(&lcu->lock); if (!(lcu->flags & UPDATE_PENDING)) { - rc = _add_device_to_lcu(lcu, device); + rc = _add_device_to_lcu(lcu, device, device); if (rc) lcu->flags |= UPDATE_PENDING; } @@ -637,10 +621,19 @@ int dasd_alias_add_device(struct dasd_device *device) list_move(&device->alias_list, &lcu->active_devices); _schedule_lcu_update(lcu, device); } - spin_unlock_irqrestore(&lcu->lock, flags); + spin_unlock(&lcu->lock); + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); return rc; } +int dasd_alias_update_add_device(struct dasd_device *device) +{ + struct dasd_eckd_private *private; + private = (struct dasd_eckd_private *) device->private; + private->lcu->flags |= UPDATE_PENDING; + return dasd_alias_add_device(device); +} + int dasd_alias_remove_device(struct dasd_device *device) { struct dasd_eckd_private *private; @@ -649,6 +642,9 @@ int dasd_alias_remove_device(struct dasd_device *device) private = (struct dasd_eckd_private *) device->private; lcu = private->lcu; + /* nothing to do if already removed */ + if (!lcu) + return 0; spin_lock_irqsave(&lcu->lock, flags); _remove_device_from_lcu(lcu, device); spin_unlock_irqrestore(&lcu->lock, flags); @@ -672,6 +668,16 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) if (lcu->pav == NO_PAV || lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) return NULL; + if (unlikely(!(private->features.feature[8] & 0x01))) { + /* + * PAV enabled but prefix not, very unlikely + * seems to be a lost pathgroup + * use base device to do IO + */ + DBF_DEV_EVENT(DBF_ERR, base_device, "%s", + "Prefix not enabled with PAV enabled\n"); + return NULL; + } spin_lock_irqsave(&lcu->lock, flags); alias_device = group->next; @@ -727,7 +733,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu, cqr->memdev = device; cqr->block = NULL; cqr->expires = 5 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); @@ -739,19 +745,30 @@ static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu) struct alias_pav_group *pavgroup; struct dasd_device *device; struct dasd_eckd_private *private; + unsigned long flags; /* active and inactive list can contain alias as well as base devices */ list_for_each_entry(device, &lcu->active_devices, alias_list) { private = (struct dasd_eckd_private *) device->private; - if (private->uid.type != UA_BASE_DEVICE) + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + if (private->uid.type != UA_BASE_DEVICE) { + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), + flags); continue; + } + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_schedule_block_bh(device->block); dasd_schedule_device_bh(device); } list_for_each_entry(device, &lcu->inactive_devices, alias_list) { private = (struct dasd_eckd_private *) device->private; - if (private->uid.type != UA_BASE_DEVICE) + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + if (private->uid.type != UA_BASE_DEVICE) { + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), + flags); continue; + } + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); dasd_schedule_block_bh(device->block); dasd_schedule_device_bh(device); } diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 8e23919c870..2ead7e78c45 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -1,11 +1,10 @@ /* - * File...........: linux/drivers/s390/block/dasd_devmap.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 + * Copyright IBM Corp. 1999,2001 * * Device mapping and dasd= parameter parsing functions. All devmap * functions may not be called from interrupt context. In particular @@ -18,6 +17,7 @@ #include <linux/ctype.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/slab.h> #include <asm/debug.h> #include <asm/uaccess.h> @@ -48,7 +48,6 @@ struct dasd_devmap { unsigned int devindex; unsigned short features; struct dasd_device *device; - struct dasd_uid uid; }; /* @@ -208,6 +207,8 @@ dasd_feature_list(char *str, char **endp) features |= DASD_FEATURE_READONLY; else if (len == 4 && !strncmp(str, "diag", 4)) features |= DASD_FEATURE_USEDIAG; + else if (len == 3 && !strncmp(str, "raw", 3)) + features |= DASD_FEATURE_USERAW; else if (len == 6 && !strncmp(str, "erplog", 6)) features |= DASD_FEATURE_ERPLOG; else if (len == 8 && !strncmp(str, "failfast", 8)) @@ -300,7 +301,7 @@ dasd_parse_keyword( char *parsestring ) { /* * Try to interprete the first element on the comma separated parse string * as a device number or a range of devices. If the interpretation is - * successfull, create the matching dasd_devmap entries and return a pointer + * successful, create the matching dasd_devmap entries and return a pointer * to the residual string. * If interpretation fails or in case of an error, return an error code. */ @@ -409,8 +410,7 @@ dasd_add_busid(const char *bus_id, int features) struct dasd_devmap *devmap, *new, *tmp; int hash; - new = (struct dasd_devmap *) - kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL); + new = kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); spin_lock(&dasd_devmap_lock); @@ -639,6 +639,7 @@ dasd_put_device_wake(struct dasd_device *device) { wake_up(&dasd_delete_wq); } +EXPORT_SYMBOL_GPL(dasd_put_device_wake); /* * Return dasd_device structure associated with cdev. @@ -671,6 +672,36 @@ dasd_device_from_cdev(struct ccw_device *cdev) return device; } +void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device) +{ + struct dasd_devmap *devmap; + + devmap = dasd_find_busid(dev_name(&device->cdev->dev)); + if (IS_ERR(devmap)) + return; + spin_lock(&dasd_devmap_lock); + gdp->private_data = devmap; + spin_unlock(&dasd_devmap_lock); +} + +struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp) +{ + struct dasd_device *device; + struct dasd_devmap *devmap; + + if (!gdp->private_data) + return NULL; + device = NULL; + spin_lock(&dasd_devmap_lock); + devmap = gdp->private_data; + if (devmap && devmap->device) { + device = devmap->device; + dasd_get_device(device); + } + spin_unlock(&dasd_devmap_lock); + return device; +} + /* * SECTION: files in sysfs */ @@ -856,7 +887,7 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr, spin_lock(&dasd_devmap_lock); /* Changing diag discipline flag is only allowed in offline state. */ rc = count; - if (!devmap->device) { + if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) { if (val) devmap->features |= DASD_FEATURE_USEDIAG; else @@ -869,6 +900,89 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store); +/* + * use_raw controls whether the driver should give access to raw eckd data or + * operate in standard mode + */ +static ssize_t +dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dasd_devmap *devmap; + int use_raw; + + devmap = dasd_find_busid(dev_name(dev)); + if (!IS_ERR(devmap)) + use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0; + else + use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0; + return sprintf(buf, use_raw ? "1\n" : "0\n"); +} + +static ssize_t +dasd_use_raw_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_devmap *devmap; + ssize_t rc; + unsigned long val; + + devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); + if (IS_ERR(devmap)) + return PTR_ERR(devmap); + + if ((kstrtoul(buf, 10, &val) != 0) || val > 1) + return -EINVAL; + + spin_lock(&dasd_devmap_lock); + /* Changing diag discipline flag is only allowed in offline state. */ + rc = count; + if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) { + if (val) + devmap->features |= DASD_FEATURE_USERAW; + else + devmap->features &= ~DASD_FEATURE_USERAW; + } else + rc = -EPERM; + spin_unlock(&dasd_devmap_lock); + return rc; +} + +static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show, + dasd_use_raw_store); + +static ssize_t +dasd_safe_offline_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct dasd_device *device; + int rc; + + device = dasd_device_from_cdev(cdev); + if (IS_ERR(device)) { + rc = PTR_ERR(device); + goto out; + } + + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || + test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* Already doing offline processing */ + dasd_put_device(device); + rc = -EBUSY; + goto out; + } + + set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); + dasd_put_device(device); + + rc = ccw_device_set_offline(cdev); + +out: + return rc ? rc : count; +} + +static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store); + static ssize_t dasd_discipline_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -935,42 +1049,48 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL); -static ssize_t -dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t dasd_alias_show(struct device *dev, + struct device_attribute *attr, char *buf) { - struct dasd_devmap *devmap; - int alias; + struct dasd_device *device; + struct dasd_uid uid; - devmap = dasd_find_busid(dev_name(dev)); - spin_lock(&dasd_devmap_lock); - if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) { - spin_unlock(&dasd_devmap_lock); + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) return sprintf(buf, "0\n"); + + if (device->discipline && device->discipline->get_uid && + !device->discipline->get_uid(device, &uid)) { + if (uid.type == UA_BASE_PAV_ALIAS || + uid.type == UA_HYPER_PAV_ALIAS) { + dasd_put_device(device); + return sprintf(buf, "1\n"); + } } - if (devmap->uid.type == UA_BASE_PAV_ALIAS || - devmap->uid.type == UA_HYPER_PAV_ALIAS) - alias = 1; - else - alias = 0; - spin_unlock(&dasd_devmap_lock); - return sprintf(buf, alias ? "1\n" : "0\n"); + dasd_put_device(device); + + return sprintf(buf, "0\n"); } static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL); -static ssize_t -dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t dasd_vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) { - struct dasd_devmap *devmap; + struct dasd_device *device; + struct dasd_uid uid; char *vendor; - devmap = dasd_find_busid(dev_name(dev)); - spin_lock(&dasd_devmap_lock); - if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0) - vendor = devmap->uid.vendor; - else - vendor = ""; - spin_unlock(&dasd_devmap_lock); + device = dasd_device_from_cdev(to_ccwdev(dev)); + vendor = ""; + if (IS_ERR(device)) + return snprintf(buf, PAGE_SIZE, "%s\n", vendor); + + if (device->discipline && device->discipline->get_uid && + !device->discipline->get_uid(device, &uid)) + vendor = uid.vendor; + + dasd_put_device(device); return snprintf(buf, PAGE_SIZE, "%s\n", vendor); } @@ -984,48 +1104,51 @@ static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL); static ssize_t dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dasd_devmap *devmap; + struct dasd_device *device; + struct dasd_uid uid; char uid_string[UID_STRLEN]; char ua_string[3]; - struct dasd_uid *uid; - devmap = dasd_find_busid(dev_name(dev)); - spin_lock(&dasd_devmap_lock); - if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) { - spin_unlock(&dasd_devmap_lock); - return sprintf(buf, "\n"); - } - uid = &devmap->uid; - switch (uid->type) { - case UA_BASE_DEVICE: - sprintf(ua_string, "%02x", uid->real_unit_addr); - break; - case UA_BASE_PAV_ALIAS: - sprintf(ua_string, "%02x", uid->base_unit_addr); - break; - case UA_HYPER_PAV_ALIAS: - sprintf(ua_string, "xx"); - break; - default: - /* should not happen, treat like base device */ - sprintf(ua_string, "%02x", uid->real_unit_addr); - break; + device = dasd_device_from_cdev(to_ccwdev(dev)); + uid_string[0] = 0; + if (IS_ERR(device)) + return snprintf(buf, PAGE_SIZE, "%s\n", uid_string); + + if (device->discipline && device->discipline->get_uid && + !device->discipline->get_uid(device, &uid)) { + switch (uid.type) { + case UA_BASE_DEVICE: + snprintf(ua_string, sizeof(ua_string), "%02x", + uid.real_unit_addr); + break; + case UA_BASE_PAV_ALIAS: + snprintf(ua_string, sizeof(ua_string), "%02x", + uid.base_unit_addr); + break; + case UA_HYPER_PAV_ALIAS: + snprintf(ua_string, sizeof(ua_string), "xx"); + break; + default: + /* should not happen, treat like base device */ + snprintf(ua_string, sizeof(ua_string), "%02x", + uid.real_unit_addr); + break; + } + + if (strlen(uid.vduit) > 0) + snprintf(uid_string, sizeof(uid_string), + "%s.%s.%04x.%s.%s", + uid.vendor, uid.serial, uid.ssid, ua_string, + uid.vduit); + else + snprintf(uid_string, sizeof(uid_string), + "%s.%s.%04x.%s", + uid.vendor, uid.serial, uid.ssid, ua_string); } - if (strlen(uid->vduit) > 0) - snprintf(uid_string, sizeof(uid_string), - "%s.%s.%04x.%s.%s", - uid->vendor, uid->serial, - uid->ssid, ua_string, - uid->vduit); - else - snprintf(uid_string, sizeof(uid_string), - "%s.%s.%04x.%s", - uid->vendor, uid->serial, - uid->ssid, ua_string); - spin_unlock(&dasd_devmap_lock); + dasd_put_device(device); + return snprintf(buf, PAGE_SIZE, "%s\n", uid_string); } - static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL); /* @@ -1074,6 +1197,241 @@ dasd_eer_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); +/* + * expiration time for default requests + */ +static ssize_t +dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dasd_device *device; + int len; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires); + dasd_put_device(device); + return len; +} + +static ssize_t +dasd_expires_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_device *device; + unsigned long val; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + + if ((kstrtoul(buf, 10, &val) != 0) || + (val > DASD_EXPIRES_MAX) || val == 0) { + dasd_put_device(device); + return -EINVAL; + } + + if (val) + device->default_expires = val; + + dasd_put_device(device); + return count; +} + +static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store); + +static ssize_t +dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dasd_device *device; + int len; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_retries); + dasd_put_device(device); + return len; +} + +static ssize_t +dasd_retries_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_device *device; + unsigned long val; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + + if ((kstrtoul(buf, 10, &val) != 0) || + (val > DASD_RETRIES_MAX)) { + dasd_put_device(device); + return -EINVAL; + } + + if (val) + device->default_retries = val; + + dasd_put_device(device); + return count; +} + +static DEVICE_ATTR(retries, 0644, dasd_retries_show, dasd_retries_store); + +static ssize_t +dasd_timeout_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dasd_device *device; + int len; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + len = snprintf(buf, PAGE_SIZE, "%lu\n", device->blk_timeout); + dasd_put_device(device); + return len; +} + +static ssize_t +dasd_timeout_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_device *device; + struct request_queue *q; + unsigned long val, flags; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device) || !device->block) + return -ENODEV; + + if ((kstrtoul(buf, 10, &val) != 0) || + val > UINT_MAX / HZ) { + dasd_put_device(device); + return -EINVAL; + } + q = device->block->request_queue; + if (!q) { + dasd_put_device(device); + return -ENODEV; + } + spin_lock_irqsave(&device->block->request_queue_lock, flags); + if (!val) + blk_queue_rq_timed_out(q, NULL); + else + blk_queue_rq_timed_out(q, dasd_times_out); + + device->blk_timeout = val; + + blk_queue_rq_timeout(q, device->blk_timeout * HZ); + spin_unlock_irqrestore(&device->block->request_queue_lock, flags); + + dasd_put_device(device); + return count; +} + +static DEVICE_ATTR(timeout, 0644, + dasd_timeout_show, dasd_timeout_store); + +static ssize_t dasd_reservation_policy_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dasd_devmap *devmap; + int rc = 0; + + devmap = dasd_find_busid(dev_name(dev)); + if (IS_ERR(devmap)) { + rc = snprintf(buf, PAGE_SIZE, "ignore\n"); + } else { + spin_lock(&dasd_devmap_lock); + if (devmap->features & DASD_FEATURE_FAILONSLCK) + rc = snprintf(buf, PAGE_SIZE, "fail\n"); + else + rc = snprintf(buf, PAGE_SIZE, "ignore\n"); + spin_unlock(&dasd_devmap_lock); + } + return rc; +} + +static ssize_t dasd_reservation_policy_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_devmap *devmap; + int rc; + + devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); + if (IS_ERR(devmap)) + return PTR_ERR(devmap); + rc = 0; + spin_lock(&dasd_devmap_lock); + if (sysfs_streq("ignore", buf)) + devmap->features &= ~DASD_FEATURE_FAILONSLCK; + else if (sysfs_streq("fail", buf)) + devmap->features |= DASD_FEATURE_FAILONSLCK; + else + rc = -EINVAL; + if (devmap->device) + devmap->device->features = devmap->features; + spin_unlock(&dasd_devmap_lock); + if (rc) + return rc; + else + return count; +} + +static DEVICE_ATTR(reservation_policy, 0644, + dasd_reservation_policy_show, dasd_reservation_policy_store); + +static ssize_t dasd_reservation_state_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dasd_device *device; + int rc = 0; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return snprintf(buf, PAGE_SIZE, "none\n"); + + if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) + rc = snprintf(buf, PAGE_SIZE, "reserved\n"); + else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) + rc = snprintf(buf, PAGE_SIZE, "lost\n"); + else + rc = snprintf(buf, PAGE_SIZE, "none\n"); + dasd_put_device(device); + return rc; +} + +static ssize_t dasd_reservation_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_device *device; + int rc = 0; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + if (sysfs_streq("reset", buf)) + clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags); + else + rc = -EINVAL; + dasd_put_device(device); + + if (rc) + return rc; + else + return count; +} + +static DEVICE_ATTR(last_known_reservation_state, 0644, + dasd_reservation_state_show, dasd_reservation_state_store); + static struct attribute * dasd_attrs[] = { &dev_attr_readonly.attr, &dev_attr_discipline.attr, @@ -1082,9 +1440,16 @@ static struct attribute * dasd_attrs[] = { &dev_attr_vendor.attr, &dev_attr_uid.attr, &dev_attr_use_diag.attr, + &dev_attr_raw_track_access.attr, &dev_attr_eer_enabled.attr, &dev_attr_erplog.attr, &dev_attr_failfast.attr, + &dev_attr_expires.attr, + &dev_attr_retries.attr, + &dev_attr_timeout.attr, + &dev_attr_reservation_policy.attr, + &dev_attr_last_known_reservation_state.attr, + &dev_attr_safe_offline.attr, NULL, }; @@ -1093,50 +1458,6 @@ static struct attribute_group dasd_attr_group = { }; /* - * Return copy of the device unique identifier. - */ -int -dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid) -{ - struct dasd_devmap *devmap; - - devmap = dasd_find_busid(dev_name(&cdev->dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); - spin_lock(&dasd_devmap_lock); - *uid = devmap->uid; - spin_unlock(&dasd_devmap_lock); - return 0; -} -EXPORT_SYMBOL_GPL(dasd_get_uid); - -/* - * Register the given device unique identifier into devmap struct. - * In addition check if the related storage server subsystem ID is already - * contained in the dasd_server_ssid_list. If subsystem ID is not contained, - * create new entry. - * Return 0 if server was already in serverlist, - * 1 if the server was added successful - * <0 in case of error. - */ -int -dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) -{ - struct dasd_devmap *devmap; - - devmap = dasd_find_busid(dev_name(&cdev->dev)); - if (IS_ERR(devmap)) - return PTR_ERR(devmap); - - spin_lock(&dasd_devmap_lock); - devmap->uid = *uid; - spin_unlock(&dasd_devmap_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(dasd_set_uid); - -/* * Return value of the specified feature. */ int @@ -1153,7 +1474,7 @@ dasd_get_feature(struct ccw_device *cdev, int feature) /* * Set / reset given feature. - * Flag indicates wether to set (!=0) or the reset (=0) the feature. + * Flag indicates whether to set (!=0) or the reset (=0) the feature. */ int dasd_set_feature(struct ccw_device *cdev, int feature, int flag) diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 687f323cdc3..c062f1620c5 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -1,15 +1,15 @@ /* - * File...........: linux/drivers/s390/block/dasd_diag.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Based on.......: linux/drivers/s390/block/mdisk.c * ...............: by Hartmunt Penner <hpenner@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 + * Copyright IBM Corp. 1999, 2000 * */ #define KMSG_COMPONENT "dasd" +#include <linux/kernel_stat.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -23,7 +23,7 @@ #include <asm/debug.h> #include <asm/ebcdic.h> #include <asm/io.h> -#include <asm/s390_ext.h> +#include <asm/irq.h> #include <asm/vtoc.h> #include <asm/diag.h> @@ -43,7 +43,7 @@ MODULE_LICENSE("GPL"); sizeof(struct dasd_diag_req)) / \ sizeof(struct dasd_diag_bio)) / 2) #define DIAG_MAX_RETRIES 32 -#define DIAG_TIMEOUT 50 * HZ +#define DIAG_TIMEOUT 50 static struct dasd_discipline dasd_diag_discipline; @@ -184,14 +184,14 @@ dasd_start_diag(struct dasd_ccw_req * cqr) private->iob.bio_list = dreq->bio; private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; - cqr->startclk = get_clock(); + cqr->startclk = get_tod_clock(); cqr->starttime = jiffies; cqr->retries--; rc = dia250(&private->iob, RW_BIO); switch (rc) { case 0: /* Synchronous I/O finished successfully */ - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->status = DASD_CQR_SUCCESS; /* Indicate to calling function that only a dasd_schedule_bh() and no timer is needed */ @@ -222,35 +222,33 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr) mdsk_term_io(device); mdsk_init_io(device, device->block->bp_block, 0, NULL); cqr->status = DASD_CQR_CLEAR_PENDING; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); dasd_schedule_device_bh(device); return 0; } /* Handle external interruption. */ -static void -dasd_ext_handler(__u16 code) +static void dasd_ext_handler(struct ext_code ext_code, + unsigned int param32, unsigned long param64) { struct dasd_ccw_req *cqr, *next; struct dasd_device *device; unsigned long long expires; unsigned long flags; - u8 int_code, status; addr_t ip; int rc; - int_code = *((u8 *) DASD_DIAG_LC_INT_CODE); - status = *((u8 *) DASD_DIAG_LC_INT_STATUS); - switch (int_code) { + switch (ext_code.subcode >> 8) { case DASD_DIAG_CODE_31BIT: - ip = (addr_t) *((u32 *) DASD_DIAG_LC_INT_PARM_31BIT); + ip = (addr_t) param32; break; case DASD_DIAG_CODE_64BIT: - ip = (addr_t) *((u64 *) DASD_DIAG_LC_INT_PARM_64BIT); + ip = (addr_t) param64; break; default: return; } + inc_irq_stat(IRQEXT_DSD); if (!ip) { /* no intparm: unsolicited interrupt */ DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited " "interrupt"); @@ -278,10 +276,10 @@ dasd_ext_handler(__u16 code) return; } - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); expires = 0; - if (status == 0) { + if ((ext_code.subcode & 0xff) == 0) { cqr->status = DASD_CQR_SUCCESS; /* Start first request on queue if possible -> fast_io. */ if (!list_empty(&device->ccw_queue)) { @@ -296,8 +294,8 @@ dasd_ext_handler(__u16 code) } else { cqr->status = DASD_CQR_QUEUED; DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " - "request %p was %d (%d retries left)", cqr, status, - cqr->retries); + "request %p was %d (%d retries left)", cqr, + ext_code.subcode & 0xff, cqr->retries); dasd_diag_erp(device); } @@ -360,6 +358,9 @@ dasd_diag_check_device(struct dasd_device *device) goto out; } + device->default_expires = DIAG_TIMEOUT; + device->default_retries = DIAG_MAX_RETRIES; + /* Figure out position of label block */ switch (private->rdc_data.vdev_class) { case DEV_CLASS_FBA: @@ -503,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, struct dasd_diag_req *dreq; struct dasd_diag_bio *dbio; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned int count, datasize; sector_t recid, first_rec, last_rec; @@ -524,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, /* Check struct bio and count the number of blocks for the request. */ count = 0; rq_for_each_segment(bv, req, iter) { - if (bv->bv_len & (blksize - 1)) + if (bv.bv_len & (blksize - 1)) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); - count += bv->bv_len >> (block->s2b_shift + 9); + count += bv.bv_len >> (block->s2b_shift + 9); } /* Paranoia. */ if (count != last_rec - first_rec + 1) @@ -544,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, dbio = dreq->bio; recid = first_rec; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += blksize) { + dst = page_address(bv.bv_page) + bv.bv_offset; + for (off = 0; off < bv.bv_len; off += blksize) { memset(dbio, 0, sizeof (struct dasd_diag_bio)); dbio->type = rw_cmd; dbio->block_number = recid + 1; @@ -555,15 +556,15 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, recid++; } } - cqr->retries = DIAG_MAX_RETRIES; - cqr->buildclk = get_clock(); + cqr->retries = memdev->default_retries; + cqr->buildclk = get_tod_clock(); if (blk_noretry_request(req) || block->base->features & DASD_FEATURE_FAILFAST) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->startdev = memdev; cqr->memdev = memdev; cqr->block = block; - cqr->expires = DIAG_TIMEOUT; + cqr->expires = memdev->default_expires * HZ; cqr->status = DASD_CQR_FILLED; return cqr; } @@ -582,7 +583,10 @@ dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req) static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr) { - cqr->status = DASD_CQR_FILLED; + if (cqr->retries < 0) + cqr->status = DASD_CQR_FAILED; + else + cqr->status = DASD_CQR_FILLED; }; /* Fill in IOCTL data for device. */ @@ -618,6 +622,7 @@ static struct dasd_discipline dasd_diag_discipline = { .ebcname = "DIAG", .max_blocks = DIAG_MAX_BLOCKS, .check_device = dasd_diag_check_device, + .verify_path = dasd_generic_verify_path, .fill_geometry = dasd_diag_fill_geometry, .start_IO = dasd_start_diag, .term_IO = dasd_diag_term_IO, @@ -640,8 +645,8 @@ dasd_diag_init(void) } ASCEBC(dasd_diag_discipline.ebcname, 4); - ctl_set_bit(0, 9); - register_external_interrupt(0x2603, dasd_ext_handler); + irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); + register_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler); dasd_diag_discipline_pointer = &dasd_diag_discipline; return 0; } @@ -649,8 +654,8 @@ dasd_diag_init(void) static void __exit dasd_diag_cleanup(void) { - unregister_external_interrupt(0x2603, dasd_ext_handler); - ctl_clear_bit(0, 9); + unregister_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler); + irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); dasd_diag_discipline_pointer = NULL; } diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h index b8c78267ff3..a803cc73158 100644 --- a/drivers/s390/block/dasd_diag.h +++ b/drivers/s390/block/dasd_diag.h @@ -1,10 +1,9 @@ /* - * File...........: linux/drivers/s390/block/dasd_diag.h * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Based on.......: linux/drivers/s390/block/mdisk.h * ...............: by Hartmunt Penner <hpenner@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 + * Copyright IBM Corp. 1999, 2000 * */ @@ -18,10 +17,6 @@ #define DEV_CLASS_FBA 0x01 #define DEV_CLASS_ECKD 0x04 -#define DASD_DIAG_LC_INT_CODE 132 -#define DASD_DIAG_LC_INT_STATUS 133 -#define DASD_DIAG_LC_INT_PARM_31BIT 128 -#define DASD_DIAG_LC_INT_PARM_64BIT 4536 #define DASD_DIAG_CODE_31BIT 0x03 #define DASD_DIAG_CODE_64BIT 0x07 diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 01f4e7a34aa..2e8e0755070 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1,5 +1,4 @@ /* - * File...........: linux/drivers/s390/block/dasd_eckd.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com> @@ -18,12 +17,13 @@ #include <linux/hdreg.h> /* HDIO_GETGEO */ #include <linux/bio.h> #include <linux/module.h> +#include <linux/compat.h> #include <linux/init.h> +#include <asm/css_chars.h> #include <asm/debug.h> #include <asm/idals.h> #include <asm/ebcdic.h> -#include <asm/compat.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/cio.h> @@ -32,8 +32,6 @@ #include "dasd_int.h" #include "dasd_eckd.h" -#include "../cio/chsc.h" - #ifdef PRINTK_HEADER #undef PRINTK_HEADER @@ -54,6 +52,15 @@ #define ECKD_F7(i) (i->factor7) #define ECKD_F8(i) (i->factor8) +/* + * raw track access always map to 64k in memory + * so it maps to 16 blocks of 4k per track + */ +#define DASD_RAW_BLOCK_PER_TRACK 16 +#define DASD_RAW_BLOCKSIZE 4096 +/* 64k are 128 x 512 byte sectors */ +#define DASD_RAW_SECTORS_PER_TRACK 128 + MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_eckd_discipline; @@ -63,7 +70,7 @@ static struct dasd_discipline dasd_eckd_discipline; static struct ccw_device_id dasd_eckd_ids[] = { { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, - { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, + { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, @@ -78,10 +85,32 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); static struct ccw_driver dasd_eckd_driver; /* see below */ +static void *rawpadpage; + #define INIT_CQR_OK 0 #define INIT_CQR_UNFORMATTED 1 #define INIT_CQR_ERROR 2 +/* emergency request for reserve/release */ +static struct { + struct dasd_ccw_req cqr; + struct ccw1 ccw; + char data[32]; +} *dasd_reserve_req; +static DEFINE_MUTEX(dasd_reserve_mutex); + +/* definitions for the path verification worker */ +struct path_verification_work_data { + struct work_struct worker; + struct dasd_device *device; + struct dasd_ccw_req cqr; + struct ccw1 ccw; + __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE]; + int isglobal; + __u8 tbvpm; +}; +static struct path_verification_work_data *path_verification_worker; +static DEFINE_MUTEX(dasd_path_verification_mutex); /* initial attempt at a probe function. this can be simplified once * the other detection code is gone */ @@ -112,6 +141,10 @@ dasd_eckd_set_online(struct ccw_device *cdev) static const int sizes_trk0[] = { 28, 148, 84 }; #define LABEL_SIZE 140 +/* head and record addresses of count_area read in analysis ccw */ +static const int count_area_head[] = { 0, 0, 0, 0, 2 }; +static const int count_area_rec[] = { 1, 2, 3, 4, 1 }; + static inline unsigned int round_up_multiple(unsigned int no, unsigned int mult) { @@ -184,7 +217,7 @@ check_XRC (struct ccw1 *de_ccw, rc = get_sync_clock(&data->ep_sys_time); /* Ignore return code if sync clock is switched off. */ - if (rc == -ENOSYS || rc == -EACCES) + if (rc == -EOPNOTSUPP || rc == -EACCES) rc = 0; de_ccw->count = sizeof(struct DE_eckd_data); @@ -295,7 +328,7 @@ static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata, rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time); /* Ignore return code if sync clock is switched off. */ - if (rc == -ENOSYS || rc == -EACCES) + if (rc == -EOPNOTSUPP || rc == -EACCES) rc = 0; return rc; } @@ -365,6 +398,23 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk, data->length = reclen; data->operation.operation = 0x03; break; + case DASD_ECKD_CCW_WRITE_FULL_TRACK: + data->operation.orientation = 0x0; + data->operation.operation = 0x3F; + data->extended_operation = 0x11; + data->length = 0; + data->extended_parameter_length = 0x02; + if (data->count > 8) { + data->extended_parameter[0] = 0xFF; + data->extended_parameter[1] = 0xFF; + data->extended_parameter[1] <<= (16 - count); + } else { + data->extended_parameter[0] = 0xFF; + data->extended_parameter[0] <<= (8 - count); + data->extended_parameter[1] = 0x00; + } + data->sector = 0xFF; + break; case DASD_ECKD_CCW_WRITE_TRACK_DATA: data->auxiliary.length_valid = 0x1; data->length = reclen; /* not tlf, as one might think */ @@ -388,6 +438,12 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk, case DASD_ECKD_CCW_READ_COUNT: data->operation.operation = 0x06; break; + case DASD_ECKD_CCW_READ_TRACK: + data->operation.orientation = 0x1; + data->operation.operation = 0x0C; + data->extended_parameter_length = 0; + data->sector = 0xFF; + break; case DASD_ECKD_CCW_READ_TRACK_DATA: data->auxiliary.length_valid = 0x1; data->length = tlf; @@ -431,10 +487,16 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, ccw->cmd_code = DASD_ECKD_CCW_PFX; ccw->flags = 0; - ccw->count = sizeof(*pfxdata); - ccw->cda = (__u32) __pa(pfxdata); + if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) { + ccw->count = sizeof(*pfxdata) + 2; + ccw->cda = (__u32) __pa(pfxdata); + memset(pfxdata, 0, sizeof(*pfxdata) + 2); + } else { + ccw->count = sizeof(*pfxdata); + ccw->cda = (__u32) __pa(pfxdata); + memset(pfxdata, 0, sizeof(*pfxdata)); + } - memset(pfxdata, 0, sizeof(*pfxdata)); /* prefix data */ if (format > 1) { DBF_DEV_EVENT(DBF_ERR, basedev, @@ -468,6 +530,7 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, dedata->mask.perm = 0x1; dedata->attributes.operation = basepriv->attrib.operation; break; + case DASD_ECKD_CCW_READ_TRACK: case DASD_ECKD_CCW_READ_TRACK_DATA: dedata->mask.perm = 0x1; dedata->attributes.operation = basepriv->attrib.operation; @@ -494,6 +557,11 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, dedata->attributes.operation = DASD_BYPASS_CACHE; rc = check_XRC_on_prefix(pfxdata, basedev); break; + case DASD_ECKD_CCW_WRITE_FULL_TRACK: + dedata->mask.perm = 0x03; + dedata->attributes.operation = basepriv->attrib.operation; + dedata->blk_size = 0; + break; case DASD_ECKD_CCW_WRITE_TRACK_DATA: dedata->mask.perm = 0x02; dedata->attributes.operation = basepriv->attrib.operation; @@ -688,22 +756,13 @@ dasd_eckd_cdl_reclen(int recid) return sizes_trk0[recid]; return LABEL_SIZE; } - -/* - * Generate device unique id that specifies the physical device. - */ -static int dasd_eckd_generate_uid(struct dasd_device *device, - struct dasd_uid *uid) +/* create unique id from private structure. */ +static void create_uid(struct dasd_eckd_private *private) { - struct dasd_eckd_private *private; int count; + struct dasd_uid *uid; - private = (struct dasd_eckd_private *) device->private; - if (!private) - return -ENODEV; - if (!private->ned || !private->gneq) - return -ENODEV; - + uid = &private->uid; memset(uid, 0, sizeof(struct dasd_uid)); memcpy(uid->vendor, private->ned->HDA_manufacturer, sizeof(uid->vendor) - 1); @@ -726,29 +785,78 @@ static int dasd_eckd_generate_uid(struct dasd_device *device, private->vdsneq->uit[count]); } } - return 0; } -static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, - void *rcd_buffer, - struct ciw *ciw, __u8 lpm) +/* + * Generate device unique id that specifies the physical device. + */ +static int dasd_eckd_generate_uid(struct dasd_device *device) { - struct dasd_ccw_req *cqr; - struct ccw1 *ccw; + struct dasd_eckd_private *private; + unsigned long flags; - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count, - device); + private = (struct dasd_eckd_private *) device->private; + if (!private) + return -ENODEV; + if (!private->ned || !private->gneq) + return -ENODEV; + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + create_uid(private); + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + return 0; +} - if (IS_ERR(cqr)) { - DBF_DEV_EVENT(DBF_WARNING, device, "%s", - "Could not allocate RCD request"); - return cqr; +static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid) +{ + struct dasd_eckd_private *private; + unsigned long flags; + + if (device->private) { + private = (struct dasd_eckd_private *)device->private; + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + *uid = private->uid; + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + return 0; } + return -EINVAL; +} + +/* + * compare device UID with data of a given dasd_eckd_private structure + * return 0 for match + */ +static int dasd_eckd_compare_path_uid(struct dasd_device *device, + struct dasd_eckd_private *private) +{ + struct dasd_uid device_uid; + + create_uid(private); + dasd_eckd_get_uid(device, &device_uid); + + return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid)); +} + +static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, + struct dasd_ccw_req *cqr, + __u8 *rcd_buffer, + __u8 lpm) +{ + struct ccw1 *ccw; + /* + * buffer has to start with EBCDIC "V1.0" to show + * support for virtual device SNEQ + */ + rcd_buffer[0] = 0xE5; + rcd_buffer[1] = 0xF1; + rcd_buffer[2] = 0x4B; + rcd_buffer[3] = 0xF0; ccw = cqr->cpaddr; - ccw->cmd_code = ciw->cmd; + ccw->cmd_code = DASD_ECKD_CCW_RCD; + ccw->flags = 0; ccw->cda = (__u32)(addr_t)rcd_buffer; - ccw->count = ciw->count; + ccw->count = DASD_ECKD_RCD_DATA_SIZE; + cqr->magic = DASD_ECKD_MAGIC; cqr->startdev = device; cqr->memdev = device; @@ -756,9 +864,57 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, cqr->expires = 10*HZ; cqr->lpm = lpm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; - return cqr; + set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); +} + +/* + * Wakeup helper for read_conf + * if the cqr is not done and needs some error recovery + * the buffer has to be re-initialized with the EBCDIC "V1.0" + * to show support for virtual device SNEQ + */ +static void read_conf_cb(struct dasd_ccw_req *cqr, void *data) +{ + struct ccw1 *ccw; + __u8 *rcd_buffer; + + if (cqr->status != DASD_CQR_DONE) { + ccw = cqr->cpaddr; + rcd_buffer = (__u8 *)((addr_t) ccw->cda); + memset(rcd_buffer, 0, sizeof(*rcd_buffer)); + + rcd_buffer[0] = 0xE5; + rcd_buffer[1] = 0xF1; + rcd_buffer[2] = 0x4B; + rcd_buffer[3] = 0xF0; + } + dasd_wakeup_cb(cqr, data); +} + +static int dasd_eckd_read_conf_immediately(struct dasd_device *device, + struct dasd_ccw_req *cqr, + __u8 *rcd_buffer, + __u8 lpm) +{ + struct ciw *ciw; + int rc; + /* + * sanity check: scan for RCD command in extended SenseID data + * some devices do not support RCD + */ + ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); + if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) + return -EOPNOTSUPP; + + dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm); + clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); + set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); + cqr->retries = 5; + cqr->callback = read_conf_cb; + rc = dasd_sleep_on_immediatly(cqr); + return rc; } static int dasd_eckd_read_conf_lpm(struct dasd_device *device, @@ -771,32 +927,30 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device, struct dasd_ccw_req *cqr; /* - * scan for RCD command in extended SenseID data + * sanity check: scan for RCD command in extended SenseID data + * some devices do not support RCD */ ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); - if (!ciw || ciw->cmd == 0) { + if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) { ret = -EOPNOTSUPP; goto out_error; } - rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); + rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA); if (!rcd_buf) { ret = -ENOMEM; goto out_error; } - - /* - * buffer has to start with EBCDIC "V1.0" to show - * support for virtual device SNEQ - */ - rcd_buf[0] = 0xE5; - rcd_buf[1] = 0xF1; - rcd_buf[2] = 0x4B; - rcd_buf[3] = 0xF0; - cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, + 0, /* use rcd_buf as data ara */ + device); if (IS_ERR(cqr)) { - ret = PTR_ERR(cqr); + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "Could not allocate RCD request"); + ret = -ENOMEM; goto out_error; } + dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm); + cqr->callback = read_conf_cb; ret = dasd_sleep_on(cqr); /* * on success we update the user input parms @@ -805,7 +959,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device, if (ret) goto out_error; - *rcd_buffer_size = ciw->count; + *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE; *rcd_buffer = rcd_buf; return 0; out_error: @@ -874,58 +1028,378 @@ static int dasd_eckd_read_conf(struct dasd_device *device) { void *conf_data; int conf_len, conf_data_saved; - int rc; - __u8 lpm; - struct dasd_eckd_private *private; - struct dasd_eckd_path *path_data; + int rc, path_err; + __u8 lpm, opm; + struct dasd_eckd_private *private, path_private; + struct dasd_path *path_data; + struct dasd_uid *uid; + char print_path_uid[60], print_device_uid[60]; private = (struct dasd_eckd_private *) device->private; - path_data = (struct dasd_eckd_path *) &private->path_data; - path_data->opm = ccw_device_get_path_mask(device->cdev); - lpm = 0x80; + path_data = &device->path_data; + opm = ccw_device_get_path_mask(device->cdev); conf_data_saved = 0; + path_err = 0; /* get configuration data per operational path */ for (lpm = 0x80; lpm; lpm>>= 1) { - if (lpm & path_data->opm){ - rc = dasd_eckd_read_conf_lpm(device, &conf_data, - &conf_len, lpm); - if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, - "Read configuration data returned " - "error %d", rc); - return rc; + if (!(lpm & opm)) + continue; + rc = dasd_eckd_read_conf_lpm(device, &conf_data, + &conf_len, lpm); + if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, + "Read configuration data returned " + "error %d", rc); + return rc; + } + if (conf_data == NULL) { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", + "No configuration data " + "retrieved"); + /* no further analysis possible */ + path_data->opm |= lpm; + continue; /* no error */ + } + /* save first valid configuration data */ + if (!conf_data_saved) { + kfree(private->conf_data); + private->conf_data = conf_data; + private->conf_len = conf_len; + if (dasd_eckd_identify_conf_parts(private)) { + private->conf_data = NULL; + private->conf_len = 0; + kfree(conf_data); + continue; } - if (conf_data == NULL) { - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", - "No configuration data " - "retrieved"); - continue; /* no error */ + /* + * build device UID that other path data + * can be compared to it + */ + dasd_eckd_generate_uid(device); + conf_data_saved++; + } else { + path_private.conf_data = conf_data; + path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; + if (dasd_eckd_identify_conf_parts( + &path_private)) { + path_private.conf_data = NULL; + path_private.conf_len = 0; + kfree(conf_data); + continue; } - /* save first valid configuration data */ - if (!conf_data_saved) { - kfree(private->conf_data); - private->conf_data = conf_data; - private->conf_len = conf_len; - if (dasd_eckd_identify_conf_parts(private)) { - private->conf_data = NULL; - private->conf_len = 0; - kfree(conf_data); - continue; - } - conf_data_saved++; + + if (dasd_eckd_compare_path_uid( + device, &path_private)) { + uid = &path_private.uid; + if (strlen(uid->vduit) > 0) + snprintf(print_path_uid, + sizeof(print_path_uid), + "%s.%s.%04x.%02x.%s", + uid->vendor, uid->serial, + uid->ssid, uid->real_unit_addr, + uid->vduit); + else + snprintf(print_path_uid, + sizeof(print_path_uid), + "%s.%s.%04x.%02x", + uid->vendor, uid->serial, + uid->ssid, + uid->real_unit_addr); + uid = &private->uid; + if (strlen(uid->vduit) > 0) + snprintf(print_device_uid, + sizeof(print_device_uid), + "%s.%s.%04x.%02x.%s", + uid->vendor, uid->serial, + uid->ssid, uid->real_unit_addr, + uid->vduit); + else + snprintf(print_device_uid, + sizeof(print_device_uid), + "%s.%s.%04x.%02x", + uid->vendor, uid->serial, + uid->ssid, + uid->real_unit_addr); + dev_err(&device->cdev->dev, + "Not all channel paths lead to " + "the same device, path %02X leads to " + "device %s instead of %s\n", lpm, + print_path_uid, print_device_uid); + path_err = -EINVAL; + continue; } - switch (dasd_eckd_path_access(conf_data, conf_len)) { + + path_private.conf_data = NULL; + path_private.conf_len = 0; + } + switch (dasd_eckd_path_access(conf_data, conf_len)) { + case 0x02: + path_data->npm |= lpm; + break; + case 0x03: + path_data->ppm |= lpm; + break; + } + path_data->opm |= lpm; + + if (conf_data != private->conf_data) + kfree(conf_data); + } + + return path_err; +} + +static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) +{ + struct dasd_eckd_private *private; + int mdc; + u32 fcx_max_data; + + private = (struct dasd_eckd_private *) device->private; + if (private->fcx_max_data) { + mdc = ccw_device_get_mdc(device->cdev, lpm); + if ((mdc < 0)) { + dev_warn(&device->cdev->dev, + "Detecting the maximum data size for zHPF " + "requests failed (rc=%d) for a new path %x\n", + mdc, lpm); + return mdc; + } + fcx_max_data = mdc * FCX_MAX_DATA_FACTOR; + if (fcx_max_data < private->fcx_max_data) { + dev_warn(&device->cdev->dev, + "The maximum data size for zHPF requests %u " + "on a new path %x is below the active maximum " + "%u\n", fcx_max_data, lpm, + private->fcx_max_data); + return -EACCES; + } + } + return 0; +} + +static int rebuild_device_uid(struct dasd_device *device, + struct path_verification_work_data *data) +{ + struct dasd_eckd_private *private; + struct dasd_path *path_data; + __u8 lpm, opm; + int rc; + + rc = -ENODEV; + private = (struct dasd_eckd_private *) device->private; + path_data = &device->path_data; + opm = device->path_data.opm; + + for (lpm = 0x80; lpm; lpm >>= 1) { + if (!(lpm & opm)) + continue; + memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); + memset(&data->cqr, 0, sizeof(data->cqr)); + data->cqr.cpaddr = &data->ccw; + rc = dasd_eckd_read_conf_immediately(device, &data->cqr, + data->rcd_buffer, + lpm); + + if (rc) { + if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */ + continue; + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, + "Read configuration data " + "returned error %d", rc); + break; + } + memcpy(private->conf_data, data->rcd_buffer, + DASD_ECKD_RCD_DATA_SIZE); + if (dasd_eckd_identify_conf_parts(private)) { + rc = -ENODEV; + } else /* first valid path is enough */ + break; + } + + if (!rc) + rc = dasd_eckd_generate_uid(device); + + return rc; +} + +static void do_path_verification_work(struct work_struct *work) +{ + struct path_verification_work_data *data; + struct dasd_device *device; + struct dasd_eckd_private path_private; + struct dasd_uid *uid; + __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE]; + __u8 lpm, opm, npm, ppm, epm; + unsigned long flags; + char print_uid[60]; + int rc; + + data = container_of(work, struct path_verification_work_data, worker); + device = data->device; + + /* delay path verification until device was resumed */ + if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { + schedule_work(work); + return; + } + + opm = 0; + npm = 0; + ppm = 0; + epm = 0; + for (lpm = 0x80; lpm; lpm >>= 1) { + if (!(lpm & data->tbvpm)) + continue; + memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); + memset(&data->cqr, 0, sizeof(data->cqr)); + data->cqr.cpaddr = &data->ccw; + rc = dasd_eckd_read_conf_immediately(device, &data->cqr, + data->rcd_buffer, + lpm); + if (!rc) { + switch (dasd_eckd_path_access(data->rcd_buffer, + DASD_ECKD_RCD_DATA_SIZE) + ) { case 0x02: - path_data->npm |= lpm; + npm |= lpm; break; case 0x03: - path_data->ppm |= lpm; + ppm |= lpm; break; } - if (conf_data != private->conf_data) - kfree(conf_data); + opm |= lpm; + } else if (rc == -EOPNOTSUPP) { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", + "path verification: No configuration " + "data retrieved"); + opm |= lpm; + } else if (rc == -EAGAIN) { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", + "path verification: device is stopped," + " try again later"); + epm |= lpm; + } else { + dev_warn(&device->cdev->dev, + "Reading device feature codes failed " + "(rc=%d) for new path %x\n", rc, lpm); + continue; } + if (verify_fcx_max_data(device, lpm)) { + opm &= ~lpm; + npm &= ~lpm; + ppm &= ~lpm; + continue; + } + + /* + * save conf_data for comparison after + * rebuild_device_uid may have changed + * the original data + */ + memcpy(&path_rcd_buf, data->rcd_buffer, + DASD_ECKD_RCD_DATA_SIZE); + path_private.conf_data = (void *) &path_rcd_buf; + path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; + if (dasd_eckd_identify_conf_parts(&path_private)) { + path_private.conf_data = NULL; + path_private.conf_len = 0; + continue; + } + + /* + * compare path UID with device UID only if at least + * one valid path is left + * in other case the device UID may have changed and + * the first working path UID will be used as device UID + */ + if (device->path_data.opm && + dasd_eckd_compare_path_uid(device, &path_private)) { + /* + * the comparison was not successful + * rebuild the device UID with at least one + * known path in case a z/VM hyperswap command + * has changed the device + * + * after this compare again + * + * if either the rebuild or the recompare fails + * the path can not be used + */ + if (rebuild_device_uid(device, data) || + dasd_eckd_compare_path_uid( + device, &path_private)) { + uid = &path_private.uid; + if (strlen(uid->vduit) > 0) + snprintf(print_uid, sizeof(print_uid), + "%s.%s.%04x.%02x.%s", + uid->vendor, uid->serial, + uid->ssid, uid->real_unit_addr, + uid->vduit); + else + snprintf(print_uid, sizeof(print_uid), + "%s.%s.%04x.%02x", + uid->vendor, uid->serial, + uid->ssid, + uid->real_unit_addr); + dev_err(&device->cdev->dev, + "The newly added channel path %02X " + "will not be used because it leads " + "to a different device %s\n", + lpm, print_uid); + opm &= ~lpm; + npm &= ~lpm; + ppm &= ~lpm; + continue; + } + } + + /* + * There is a small chance that a path is lost again between + * above path verification and the following modification of + * the device opm mask. We could avoid that race here by using + * yet another path mask, but we rather deal with this unlikely + * situation in dasd_start_IO. + */ + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + if (!device->path_data.opm && opm) { + device->path_data.opm = opm; + dasd_generic_path_operational(device); + } else + device->path_data.opm |= opm; + device->path_data.npm |= npm; + device->path_data.ppm |= ppm; + device->path_data.tbvpm |= epm; + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); } + + dasd_put_device(device); + if (data->isglobal) + mutex_unlock(&dasd_path_verification_mutex); + else + kfree(data); +} + +static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm) +{ + struct path_verification_work_data *data; + + data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA); + if (!data) { + if (mutex_trylock(&dasd_path_verification_mutex)) { + data = path_verification_worker; + data->isglobal = 1; + } else + return -ENOMEM; + } else { + memset(data, 0, sizeof(*data)); + data->isglobal = 0; + } + INIT_WORK(&data->worker, do_path_verification_work); + dasd_get_device(device); + data->device = device; + data->tbvpm = lpm; + schedule_work(&data->worker); return 0; } @@ -977,7 +1451,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) ccw->count = sizeof(struct dasd_rssd_features); ccw->cda = (__u32)(addr_t) features; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on(cqr); if (rc == 0) { @@ -1029,7 +1503,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, cqr->block = NULL; cqr->retries = 256; cqr->expires = 10*HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -1040,7 +1514,8 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, * call might change behaviour of DASD devices. */ static int -dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav) +dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav, + unsigned long flags) { struct dasd_ccw_req *cqr; int rc; @@ -1049,10 +1524,19 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav) if (IS_ERR(cqr)) return PTR_ERR(cqr); + /* + * set flags e.g. turn on failfast, to prevent blocking + * the calling function should handle failed requests + */ + cqr->flags |= flags; + rc = dasd_sleep_on(cqr); if (!rc) /* trigger CIO to reprobe devices */ css_schedule_reprobe(); + else if (cqr->intrc == -EAGAIN) + rc = -EAGAIN; + dasd_sfree_request(cqr, cqr->memdev); return rc; } @@ -1060,23 +1544,92 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav) /* * Valide storage server of current device. */ -static void dasd_eckd_validate_server(struct dasd_device *device) +static int dasd_eckd_validate_server(struct dasd_device *device, + unsigned long flags) { int rc; struct dasd_eckd_private *private; int enable_pav; + private = (struct dasd_eckd_private *) device->private; + if (private->uid.type == UA_BASE_PAV_ALIAS || + private->uid.type == UA_HYPER_PAV_ALIAS) + return 0; if (dasd_nopav || MACHINE_IS_VM) enable_pav = 0; else enable_pav = 1; - rc = dasd_eckd_psf_ssc(device, enable_pav); + rc = dasd_eckd_psf_ssc(device, enable_pav, flags); /* may be requested feature is not available on server, * therefore just report error and go ahead */ - private = (struct dasd_eckd_private *) device->private; DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " "returned rc=%d", private->uid.ssid, rc); + return rc; +} + +/* + * worker to do a validate server in case of a lost pathgroup + */ +static void dasd_eckd_do_validate_server(struct work_struct *work) +{ + struct dasd_device *device = container_of(work, struct dasd_device, + kick_validate); + unsigned long flags = 0; + + set_bit(DASD_CQR_FLAGS_FAILFAST, &flags); + if (dasd_eckd_validate_server(device, flags) + == -EAGAIN) { + /* schedule worker again if failed */ + schedule_work(&device->kick_validate); + return; + } + + dasd_put_device(device); +} + +static void dasd_eckd_kick_validate_server(struct dasd_device *device) +{ + dasd_get_device(device); + /* exit if device not online or in offline processing */ + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || + device->state < DASD_STATE_ONLINE) { + dasd_put_device(device); + return; + } + /* queue call to do_validate_server to the kernel event daemon. */ + schedule_work(&device->kick_validate); +} + +static u32 get_fcx_max_data(struct dasd_device *device) +{ +#if defined(CONFIG_64BIT) + int tpm, mdc; + int fcx_in_css, fcx_in_gneq, fcx_in_features; + struct dasd_eckd_private *private; + + if (dasd_nofcx) + return 0; + /* is transport mode supported? */ + private = (struct dasd_eckd_private *) device->private; + fcx_in_css = css_general_characteristics.fcx; + fcx_in_gneq = private->gneq->reserved2[7] & 0x04; + fcx_in_features = private->features.feature[40] & 0x80; + tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; + + if (!tpm) + return 0; + + mdc = ccw_device_get_mdc(device->cdev, 0); + if (mdc < 0) { + dev_warn(&device->cdev->dev, "Detecting the maximum supported" + " data size for zHPF requests failed\n"); + return 0; + } else + return mdc * FCX_MAX_DATA_FACTOR; +#else + return 0; +#endif } /* @@ -1088,8 +1641,13 @@ dasd_eckd_check_characteristics(struct dasd_device *device) { struct dasd_eckd_private *private; struct dasd_block *block; - int is_known, rc; + struct dasd_uid temp_uid; + int rc, i; int readonly; + unsigned long value; + + /* setup work queue for validate server*/ + INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); if (!ccw_device_is_pathgroup(device->cdev)) { dev_warn(&device->cdev->dev, @@ -1124,13 +1682,23 @@ dasd_eckd_check_characteristics(struct dasd_device *device) if (rc) goto out_err1; - /* Generate device unique id and register in devmap */ - rc = dasd_eckd_generate_uid(device, &private->uid); - if (rc) - goto out_err1; - dasd_set_uid(device->cdev, &private->uid); + /* set default timeout */ + device->default_expires = DASD_EXPIRES; + /* set default retry count */ + device->default_retries = DASD_RETRIES; + + if (private->gneq) { + value = 1; + for (i = 0; i < private->gneq->timeout.value; i++) + value = 10 * value; + value = value * private->gneq->timeout.number; + /* do not accept useless values */ + if (value != 0 && value <= DASD_EXPIRES_MAX) + device->default_expires = value; + } - if (private->uid.type == UA_BASE_DEVICE) { + dasd_eckd_get_uid(device, &temp_uid); + if (temp_uid.type == UA_BASE_DEVICE) { block = dasd_alloc_block(); if (IS_ERR(block)) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", @@ -1143,22 +1711,12 @@ dasd_eckd_check_characteristics(struct dasd_device *device) block->base = device; } - /* register lcu with alias handling, enable PAV if this is a new lcu */ - is_known = dasd_alias_make_device_known_to_lcu(device); - if (is_known < 0) { - rc = is_known; + /* register lcu with alias handling, enable PAV */ + rc = dasd_alias_make_device_known_to_lcu(device); + if (rc) goto out_err2; - } - /* - * dasd_eckd_vaildate_server is done on the first device that - * is found for an LCU. All later other devices have to wait - * for it, so they will read the correct feature codes. - */ - if (!is_known) { - dasd_eckd_validate_server(device); - dasd_alias_lcu_setup_complete(device); - } else - dasd_alias_wait_for_lcu_setup(device); + + dasd_eckd_validate_server(device, 0); /* device may report different configuration data after LCU setup */ rc = dasd_eckd_read_conf(device); @@ -1176,13 +1734,24 @@ dasd_eckd_check_characteristics(struct dasd_device *device) "Read device characteristic failed, rc=%d", rc); goto out_err3; } - /* find the vaild cylinder size */ + + if ((device->features & DASD_FEATURE_USERAW) && + !(private->rdc_data.facilities.RT_in_LR)) { + dev_err(&device->cdev->dev, "The storage server does not " + "support raw-track access\n"); + rc = -EINVAL; + goto out_err3; + } + + /* find the valid cylinder size */ if (private->rdc_data.no_cyl == LV_COMPAT_CYL && private->rdc_data.long_no_cyl) private->real_cyl = private->rdc_data.long_no_cyl; else private->real_cyl = private->rdc_data.no_cyl; + private->fcx_max_data = get_fcx_max_data(device); + readonly = dasd_device_is_ro(device); if (readonly) set_bit(DASD_FLAG_DEVICE_RO, &device->flags); @@ -1280,7 +1849,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) cqr->startdev = device; cqr->memdev = device; cqr->retries = 255; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -1324,10 +1893,8 @@ static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, static int dasd_eckd_start_analysis(struct dasd_block *block) { - struct dasd_eckd_private *private; struct dasd_ccw_req *init_cqr; - private = (struct dasd_eckd_private *) block->base->private; init_cqr = dasd_eckd_analysis_ccw(block->base); if (IS_ERR(init_cqr)) return PTR_ERR(init_cqr); @@ -1364,6 +1931,13 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) dasd_sfree_request(init_cqr, device); } + if (device->features & DASD_FEATURE_USERAW) { + block->bp_block = DASD_RAW_BLOCKSIZE; + blk_per_trk = DASD_RAW_BLOCK_PER_TRACK; + block->s2b_shift = 3; + goto raw; + } + if (status == INIT_CQR_UNFORMATTED) { dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); return -EMEDIUMTYPE; @@ -1379,7 +1953,10 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) count_area = NULL; for (i = 0; i < 3; i++) { if (private->count_area[i].kl != 4 || - private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) { + private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 || + private->count_area[i].cyl != 0 || + private->count_area[i].head != count_area_head[i] || + private->count_area[i].record != count_area_rec[i]) { private->uses_cdl = 0; break; } @@ -1391,7 +1968,10 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) for (i = 0; i < 5; i++) { if ((private->count_area[i].kl != 0) || (private->count_area[i].dl != - private->count_area[0].dl)) + private->count_area[0].dl) || + private->count_area[i].cyl != 0 || + private->count_area[i].head != count_area_head[i] || + private->count_area[i].record != count_area_rec[i]) break; } if (i == 5) @@ -1401,6 +1981,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) dev_warn(&device->cdev->dev, "Track 0 has no records following the VTOC\n"); } + if (count_area != NULL && count_area->kl == 0) { /* we found notthing violating our disk layout */ if (dasd_check_blocksize(count_area->dl) == 0) @@ -1416,6 +1997,8 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) block->s2b_shift++; blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); + +raw: block->blocks = (private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk); @@ -1444,13 +2027,20 @@ static int dasd_eckd_do_analysis(struct dasd_block *block) return dasd_eckd_end_analysis(block); } -static int dasd_eckd_ready_to_online(struct dasd_device *device) +static int dasd_eckd_basic_to_ready(struct dasd_device *device) { return dasd_alias_add_device(device); }; static int dasd_eckd_online_to_ready(struct dasd_device *device) { + cancel_work_sync(&device->reload_device); + cancel_work_sync(&device->kick_validate); + return 0; +}; + +static int dasd_eckd_ready_to_basic(struct dasd_device *device) +{ return dasd_alias_remove_device(device); }; @@ -1470,45 +2060,35 @@ dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) } static struct dasd_ccw_req * -dasd_eckd_format_device(struct dasd_device * device, - struct format_data_t * fdata) +dasd_eckd_build_format(struct dasd_device *base, + struct format_data_t *fdata) { - struct dasd_eckd_private *private; + struct dasd_eckd_private *base_priv; + struct dasd_eckd_private *start_priv; + struct dasd_device *startdev; struct dasd_ccw_req *fcp; struct eckd_count *ect; + struct ch_t address; struct ccw1 *ccw; void *data; int rpt; - struct ch_t address; int cplength, datasize; - int i; + int i, j; int intensity = 0; int r0_perm; + int nr_tracks; + int use_prefix; - private = (struct dasd_eckd_private *) device->private; - rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize); - set_ch_t(&address, - fdata->start_unit / private->rdc_data.trk_per_cyl, - fdata->start_unit % private->rdc_data.trk_per_cyl); + startdev = dasd_alias_get_start_dev(base); + if (!startdev) + startdev = base; - /* Sanity checks. */ - if (fdata->start_unit >= - (private->real_cyl * private->rdc_data.trk_per_cyl)) { - dev_warn(&device->cdev->dev, "Start track number %d used in " - "formatting is too big\n", fdata->start_unit); - return ERR_PTR(-EINVAL); - } - if (fdata->start_unit > fdata->stop_unit) { - dev_warn(&device->cdev->dev, "Start track %d used in " - "formatting exceeds end track\n", fdata->start_unit); - return ERR_PTR(-EINVAL); - } - if (dasd_check_blocksize(fdata->blksize) != 0) { - dev_warn(&device->cdev->dev, - "The DASD cannot be formatted with block size %d\n", - fdata->blksize); - return ERR_PTR(-EINVAL); - } + start_priv = (struct dasd_eckd_private *) startdev->private; + base_priv = (struct dasd_eckd_private *) base->private; + + rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize); + + nr_tracks = fdata->stop_unit - fdata->start_unit + 1; /* * fdata->intensity is a bit string that tells us what to do: @@ -1526,155 +2106,336 @@ dasd_eckd_format_device(struct dasd_device * device, r0_perm = 1; intensity = fdata->intensity; } + + use_prefix = base_priv->features.feature[8] & 0x01; + switch (intensity) { case 0x00: /* Normal format */ case 0x08: /* Normal format, use cdl. */ - cplength = 2 + rpt; - datasize = sizeof(struct DE_eckd_data) + - sizeof(struct LO_eckd_data) + - rpt * sizeof(struct eckd_count); + cplength = 2 + (rpt*nr_tracks); + if (use_prefix) + datasize = sizeof(struct PFX_eckd_data) + + sizeof(struct LO_eckd_data) + + rpt * nr_tracks * sizeof(struct eckd_count); + else + datasize = sizeof(struct DE_eckd_data) + + sizeof(struct LO_eckd_data) + + rpt * nr_tracks * sizeof(struct eckd_count); break; case 0x01: /* Write record zero and format track. */ case 0x09: /* Write record zero and format track, use cdl. */ - cplength = 3 + rpt; - datasize = sizeof(struct DE_eckd_data) + - sizeof(struct LO_eckd_data) + - sizeof(struct eckd_count) + - rpt * sizeof(struct eckd_count); + cplength = 2 + rpt * nr_tracks; + if (use_prefix) + datasize = sizeof(struct PFX_eckd_data) + + sizeof(struct LO_eckd_data) + + sizeof(struct eckd_count) + + rpt * nr_tracks * sizeof(struct eckd_count); + else + datasize = sizeof(struct DE_eckd_data) + + sizeof(struct LO_eckd_data) + + sizeof(struct eckd_count) + + rpt * nr_tracks * sizeof(struct eckd_count); break; case 0x04: /* Invalidate track. */ case 0x0c: /* Invalidate track, use cdl. */ cplength = 3; - datasize = sizeof(struct DE_eckd_data) + - sizeof(struct LO_eckd_data) + - sizeof(struct eckd_count); + if (use_prefix) + datasize = sizeof(struct PFX_eckd_data) + + sizeof(struct LO_eckd_data) + + sizeof(struct eckd_count); + else + datasize = sizeof(struct DE_eckd_data) + + sizeof(struct LO_eckd_data) + + sizeof(struct eckd_count); break; default: - dev_warn(&device->cdev->dev, "An I/O control call used " - "incorrect flags 0x%x\n", fdata->intensity); + dev_warn(&startdev->cdev->dev, + "An I/O control call used incorrect flags 0x%x\n", + fdata->intensity); return ERR_PTR(-EINVAL); } /* Allocate the format ccw request. */ - fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); + fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, + datasize, startdev); if (IS_ERR(fcp)) return fcp; + start_priv->count++; data = fcp->data; ccw = fcp->cpaddr; switch (intensity & ~0x08) { case 0x00: /* Normal format. */ - define_extent(ccw++, (struct DE_eckd_data *) data, - fdata->start_unit, fdata->start_unit, - DASD_ECKD_CCW_WRITE_CKD, device); - /* grant subsystem permission to format R0 */ - if (r0_perm) - ((struct DE_eckd_data *)data)->ga_extended |= 0x04; - data += sizeof(struct DE_eckd_data); + if (use_prefix) { + prefix(ccw++, (struct PFX_eckd_data *) data, + fdata->start_unit, fdata->stop_unit, + DASD_ECKD_CCW_WRITE_CKD, base, startdev); + /* grant subsystem permission to format R0 */ + if (r0_perm) + ((struct PFX_eckd_data *)data) + ->define_extent.ga_extended |= 0x04; + data += sizeof(struct PFX_eckd_data); + } else { + define_extent(ccw++, (struct DE_eckd_data *) data, + fdata->start_unit, fdata->stop_unit, + DASD_ECKD_CCW_WRITE_CKD, startdev); + /* grant subsystem permission to format R0 */ + if (r0_perm) + ((struct DE_eckd_data *) data) + ->ga_extended |= 0x04; + data += sizeof(struct DE_eckd_data); + } ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, (struct LO_eckd_data *) data, - fdata->start_unit, 0, rpt, - DASD_ECKD_CCW_WRITE_CKD, device, + fdata->start_unit, 0, rpt*nr_tracks, + DASD_ECKD_CCW_WRITE_CKD, base, fdata->blksize); data += sizeof(struct LO_eckd_data); break; case 0x01: /* Write record zero + format track. */ - define_extent(ccw++, (struct DE_eckd_data *) data, - fdata->start_unit, fdata->start_unit, - DASD_ECKD_CCW_WRITE_RECORD_ZERO, - device); - data += sizeof(struct DE_eckd_data); + if (use_prefix) { + prefix(ccw++, (struct PFX_eckd_data *) data, + fdata->start_unit, fdata->stop_unit, + DASD_ECKD_CCW_WRITE_RECORD_ZERO, + base, startdev); + data += sizeof(struct PFX_eckd_data); + } else { + define_extent(ccw++, (struct DE_eckd_data *) data, + fdata->start_unit, fdata->stop_unit, + DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev); + data += sizeof(struct DE_eckd_data); + } ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, (struct LO_eckd_data *) data, - fdata->start_unit, 0, rpt + 1, - DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, - device->block->bp_block); + fdata->start_unit, 0, rpt * nr_tracks + 1, + DASD_ECKD_CCW_WRITE_RECORD_ZERO, base, + base->block->bp_block); data += sizeof(struct LO_eckd_data); break; case 0x04: /* Invalidate track. */ - define_extent(ccw++, (struct DE_eckd_data *) data, - fdata->start_unit, fdata->start_unit, - DASD_ECKD_CCW_WRITE_CKD, device); - data += sizeof(struct DE_eckd_data); + if (use_prefix) { + prefix(ccw++, (struct PFX_eckd_data *) data, + fdata->start_unit, fdata->stop_unit, + DASD_ECKD_CCW_WRITE_CKD, base, startdev); + data += sizeof(struct PFX_eckd_data); + } else { + define_extent(ccw++, (struct DE_eckd_data *) data, + fdata->start_unit, fdata->stop_unit, + DASD_ECKD_CCW_WRITE_CKD, startdev); + data += sizeof(struct DE_eckd_data); + } ccw[-1].flags |= CCW_FLAG_CC; locate_record(ccw++, (struct LO_eckd_data *) data, fdata->start_unit, 0, 1, - DASD_ECKD_CCW_WRITE_CKD, device, 8); + DASD_ECKD_CCW_WRITE_CKD, base, 8); data += sizeof(struct LO_eckd_data); break; } - if (intensity & 0x01) { /* write record zero */ - ect = (struct eckd_count *) data; - data += sizeof(struct eckd_count); - ect->cyl = address.cyl; - ect->head = address.head; - ect->record = 0; - ect->kl = 0; - ect->dl = 8; - ccw[-1].flags |= CCW_FLAG_CC; - ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; - ccw->flags = CCW_FLAG_SLI; - ccw->count = 8; - ccw->cda = (__u32)(addr_t) ect; - ccw++; - } - if ((intensity & ~0x08) & 0x04) { /* erase track */ - ect = (struct eckd_count *) data; - data += sizeof(struct eckd_count); - ect->cyl = address.cyl; - ect->head = address.head; - ect->record = 1; - ect->kl = 0; - ect->dl = 0; - ccw[-1].flags |= CCW_FLAG_CC; - ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; - ccw->flags = CCW_FLAG_SLI; - ccw->count = 8; - ccw->cda = (__u32)(addr_t) ect; - } else { /* write remaining records */ - for (i = 0; i < rpt; i++) { + + for (j = 0; j < nr_tracks; j++) { + /* calculate cylinder and head for the current track */ + set_ch_t(&address, + (fdata->start_unit + j) / + base_priv->rdc_data.trk_per_cyl, + (fdata->start_unit + j) % + base_priv->rdc_data.trk_per_cyl); + if (intensity & 0x01) { /* write record zero */ ect = (struct eckd_count *) data; data += sizeof(struct eckd_count); ect->cyl = address.cyl; ect->head = address.head; - ect->record = i + 1; + ect->record = 0; ect->kl = 0; - ect->dl = fdata->blksize; - /* Check for special tracks 0-1 when formatting CDL */ - if ((intensity & 0x08) && - fdata->start_unit == 0) { - if (i < 3) { - ect->kl = 4; - ect->dl = sizes_trk0[i] - 4; - } - } - if ((intensity & 0x08) && - fdata->start_unit == 1) { - ect->kl = 44; - ect->dl = LABEL_SIZE - 44; - } + ect->dl = 8; ccw[-1].flags |= CCW_FLAG_CC; - ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; + ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; ccw->flags = CCW_FLAG_SLI; ccw->count = 8; ccw->cda = (__u32)(addr_t) ect; ccw++; } + if ((intensity & ~0x08) & 0x04) { /* erase track */ + ect = (struct eckd_count *) data; + data += sizeof(struct eckd_count); + ect->cyl = address.cyl; + ect->head = address.head; + ect->record = 1; + ect->kl = 0; + ect->dl = 0; + ccw[-1].flags |= CCW_FLAG_CC; + ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; + ccw->flags = CCW_FLAG_SLI; + ccw->count = 8; + ccw->cda = (__u32)(addr_t) ect; + } else { /* write remaining records */ + for (i = 0; i < rpt; i++) { + ect = (struct eckd_count *) data; + data += sizeof(struct eckd_count); + ect->cyl = address.cyl; + ect->head = address.head; + ect->record = i + 1; + ect->kl = 0; + ect->dl = fdata->blksize; + /* + * Check for special tracks 0-1 + * when formatting CDL + */ + if ((intensity & 0x08) && + fdata->start_unit == 0) { + if (i < 3) { + ect->kl = 4; + ect->dl = sizes_trk0[i] - 4; + } + } + if ((intensity & 0x08) && + fdata->start_unit == 1) { + ect->kl = 44; + ect->dl = LABEL_SIZE - 44; + } + ccw[-1].flags |= CCW_FLAG_CC; + if (i != 0 || j == 0) + ccw->cmd_code = + DASD_ECKD_CCW_WRITE_CKD; + else + ccw->cmd_code = + DASD_ECKD_CCW_WRITE_CKD_MT; + ccw->flags = CCW_FLAG_SLI; + ccw->count = 8; + ccw->cda = (__u32)(addr_t) ect; + ccw++; + } + } } - fcp->startdev = device; - fcp->memdev = device; + + fcp->startdev = startdev; + fcp->memdev = startdev; fcp->retries = 256; - fcp->buildclk = get_clock(); + fcp->expires = startdev->default_expires * HZ; + fcp->buildclk = get_tod_clock(); fcp->status = DASD_CQR_FILLED; + return fcp; } +static int +dasd_eckd_format_device(struct dasd_device *base, + struct format_data_t *fdata) +{ + struct dasd_ccw_req *cqr, *n; + struct dasd_block *block; + struct dasd_eckd_private *private; + struct list_head format_queue; + struct dasd_device *device; + int old_stop, format_step; + int step, rc = 0; + + block = base->block; + private = (struct dasd_eckd_private *) base->private; + + /* Sanity checks. */ + if (fdata->start_unit >= + (private->real_cyl * private->rdc_data.trk_per_cyl)) { + dev_warn(&base->cdev->dev, + "Start track number %u used in formatting is too big\n", + fdata->start_unit); + return -EINVAL; + } + if (fdata->stop_unit >= + (private->real_cyl * private->rdc_data.trk_per_cyl)) { + dev_warn(&base->cdev->dev, + "Stop track number %u used in formatting is too big\n", + fdata->stop_unit); + return -EINVAL; + } + if (fdata->start_unit > fdata->stop_unit) { + dev_warn(&base->cdev->dev, + "Start track %u used in formatting exceeds end track\n", + fdata->start_unit); + return -EINVAL; + } + if (dasd_check_blocksize(fdata->blksize) != 0) { + dev_warn(&base->cdev->dev, + "The DASD cannot be formatted with block size %u\n", + fdata->blksize); + return -EINVAL; + } + + INIT_LIST_HEAD(&format_queue); + old_stop = fdata->stop_unit; + + while (fdata->start_unit <= 1) { + fdata->stop_unit = fdata->start_unit; + cqr = dasd_eckd_build_format(base, fdata); + list_add(&cqr->blocklist, &format_queue); + + fdata->stop_unit = old_stop; + fdata->start_unit++; + + if (fdata->start_unit > fdata->stop_unit) + goto sleep; + } + +retry: + format_step = 255 / recs_per_track(&private->rdc_data, 0, + fdata->blksize); + while (fdata->start_unit <= old_stop) { + step = fdata->stop_unit - fdata->start_unit + 1; + if (step > format_step) + fdata->stop_unit = fdata->start_unit + format_step - 1; + + cqr = dasd_eckd_build_format(base, fdata); + if (IS_ERR(cqr)) { + if (PTR_ERR(cqr) == -ENOMEM) { + /* + * not enough memory available + * go to out and start requests + * retry after first requests were finished + */ + fdata->stop_unit = old_stop; + goto sleep; + } else + return PTR_ERR(cqr); + } + list_add(&cqr->blocklist, &format_queue); + + fdata->start_unit = fdata->stop_unit + 1; + fdata->stop_unit = old_stop; + } + +sleep: + dasd_sleep_on_queue(&format_queue); + + list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { + device = cqr->startdev; + private = (struct dasd_eckd_private *) device->private; + if (cqr->status == DASD_CQR_FAILED) + rc = -EIO; + list_del_init(&cqr->blocklist); + dasd_sfree_request(cqr, device); + private->count--; + } + + /* + * in case of ENOMEM we need to retry after + * first requests are finished + */ + if (fdata->start_unit <= fdata->stop_unit) + goto retry; + + return rc; +} + static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) { + if (cqr->retries < 0) { + cqr->status = DASD_CQR_FAILED; + return; + } cqr->status = DASD_CQR_FILLED; if (cqr->block && (cqr->startdev != cqr->block->base)) { dasd_eckd_reset_ccw_to_base_io(cqr); cqr->startdev = cqr->block->base; + cqr->lpm = cqr->block->base->path_data.opm; } }; @@ -1703,60 +2464,73 @@ dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) return dasd_default_erp_postaction; } - -static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, - struct irb *irb) +static void dasd_eckd_check_for_device_change(struct dasd_device *device, + struct dasd_ccw_req *cqr, + struct irb *irb) { char mask; char *sense = NULL; + struct dasd_eckd_private *private; + private = (struct dasd_eckd_private *) device->private; /* first of all check for state change pending interrupt */ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; if ((scsw_dstat(&irb->scsw) & mask) == mask) { + /* + * for alias only, not in offline processing + * and only if not suspended + */ + if (!device->block && private->lcu && + device->state == DASD_STATE_ONLINE && + !test_bit(DASD_FLAG_OFFLINE, &device->flags) && + !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { + /* + * the state change could be caused by an alias + * reassignment remove device from alias handling + * to prevent new requests from being scheduled on + * the wrong alias device + */ + dasd_alias_remove_device(device); + + /* schedule worker to reload device */ + dasd_reload_device(device); + } dasd_generic_handle_state_change(device); return; } + sense = dasd_get_sense(irb); + if (!sense) + return; + /* summary unit check */ - if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && - (irb->ecw[7] == 0x0D)) { + if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && + (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { dasd_alias_handle_summary_unit_check(device, irb); return; } - sense = dasd_get_sense(irb); /* service information message SIM */ - if (sense && !(sense[27] & DASD_SENSE_BIT_0) && + if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) && ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { dasd_3990_erp_handle_sim(device, sense); - dasd_schedule_device_bh(device); - return; - } - - if ((scsw_cc(&irb->scsw) == 1) && - (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && - (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) && - (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) { - /* fake irb do nothing, they are handled elsewhere */ - dasd_schedule_device_bh(device); return; } - if (!sense) { - /* just report other unsolicited interrupts */ - DBF_DEV_EVENT(DBF_ERR, device, "%s", - "unsolicited interrupt received"); - } else { - DBF_DEV_EVENT(DBF_ERR, device, "%s", - "unsolicited interrupt received " - "(sense available)"); - device->discipline->dump_sense_dbf(device, irb, "unsolicited"); + /* loss of device reservation is handled via base devices only + * as alias devices may be used with several bases + */ + if (device->block && (sense[27] & DASD_SENSE_BIT_0) && + (sense[7] == 0x3F) && + (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && + test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) { + if (device->features & DASD_FEATURE_FAILONSLCK) + set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags); + clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); + dev_err(&device->cdev->dev, + "The device reservation was lost\n"); } - - dasd_schedule_device_bh(device); - return; -}; - +} static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( struct dasd_device *startdev, @@ -1777,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned int off; int count, cidaw, cplength, datasize; @@ -1799,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( count = 0; cidaw = 0; rq_for_each_segment(bv, req, iter) { - if (bv->bv_len & (blksize - 1)) + if (bv.bv_len & (blksize - 1)) /* Eckd can only do full blocks. */ return ERR_PTR(-EINVAL); - count += bv->bv_len >> (block->s2b_shift + 9); + count += bv.bv_len >> (block->s2b_shift + 9); #if defined(CONFIG_64BIT) - if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) - cidaw += bv->bv_len >> (block->s2b_shift + 9); + if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) + cidaw += bv.bv_len >> (block->s2b_shift + 9); #endif } /* Paranoia. */ @@ -1856,7 +2630,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( sizeof(struct PFX_eckd_data)); } else { if (define_extent(ccw++, cqr->data, first_trk, - last_trk, cmd, startdev) == -EAGAIN) { + last_trk, cmd, basedev) == -EAGAIN) { /* Clock not in sync and XRC is enabled. * Try again later. */ @@ -1876,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( last_rec - recid + 1, cmd, basedev, blksize); } rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; + dst = page_address(bv.bv_page) + bv.bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, GFP_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) - memcpy(copy + bv->bv_offset, dst, bv->bv_len); + memcpy(copy + bv.bv_offset, dst, bv.bv_len); if (copy) - dst = copy + bv->bv_offset; + dst = copy + bv.bv_offset; } - for (off = 0; off < bv->bv_len; off += blksize) { + for (off = 0; off < bv.bv_len; off += blksize) { sector_t trkid = recid; unsigned int recoffs = sector_div(trkid, blk_per_trk); rcmd = cmd; @@ -1936,10 +2710,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( cqr->startdev = startdev; cqr->memdev = startdev; cqr->block = block; - cqr->expires = 5 * 60 * HZ; /* 5 minutes */ - cqr->lpm = private->path_data.ppm; - cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ + cqr->lpm = startdev->path_data.ppm; + cqr->retries = startdev->default_retries; + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -1957,12 +2731,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( unsigned int blk_per_trk, unsigned int blksize) { - struct dasd_eckd_private *private; unsigned long *idaws; struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst, *idaw_dst; unsigned int cidaw, cplength, datasize; unsigned int tlf; @@ -1976,7 +2749,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( unsigned int recoffs; basedev = block->base; - private = (struct dasd_eckd_private *) basedev->private; if (rq_data_dir(req) == READ) cmd = DASD_ECKD_CCW_READ_TRACK_DATA; else if (rq_data_dir(req) == WRITE) @@ -2038,11 +2810,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( new_track = 1; end_idaw = 0; len_to_track_end = 0; - idaw_dst = 0; + idaw_dst = NULL; idaw_len = 0; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - seg_len = bv->bv_len; + dst = page_address(bv.bv_page) + bv.bv_offset; + seg_len = bv.bv_len; while (seg_len) { if (new_track) { trkid = recid; @@ -2100,7 +2872,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( if (end_idaw) { idaws = idal_create_words(idaws, idaw_dst, idaw_len); - idaw_dst = 0; + idaw_dst = NULL; idaw_len = 0; end_idaw = 0; } @@ -2113,10 +2885,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( cqr->startdev = startdev; cqr->memdev = startdev; cqr->block = block; - cqr->expires = 5 * 60 * HZ; /* 5 minutes */ - cqr->lpm = private->path_data.ppm; - cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ + cqr->lpm = startdev->path_data.ppm; + cqr->retries = startdev->default_retries; + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -2249,8 +3021,7 @@ static int prepare_itcw(struct itcw *itcw, dcw = itcw_add_dcw(itcw, pfx_cmd, 0, &pfxdata, sizeof(pfxdata), total_data_size); - - return rc; + return PTR_RET(dcw); } static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( @@ -2266,10 +3037,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( unsigned int blk_per_trk, unsigned int blksize) { - struct dasd_eckd_private *private; struct dasd_ccw_req *cqr; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned int trkcount, ctidaw; unsigned char cmd; @@ -2279,9 +3049,15 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( struct tidaw *last_tidaw = NULL; int itcw_op; size_t itcw_size; + u8 tidaw_flags; + unsigned int seg_len, part_len, len_to_track_end; + unsigned char new_track; + sector_t recid, trkid; + unsigned int offs; + unsigned int count, count_to_trk_end; + int ret; basedev = block->base; - private = (struct dasd_eckd_private *) basedev->private; if (rq_data_dir(req) == READ) { cmd = DASD_ECKD_CCW_READ_TRACK_DATA; itcw_op = ITCW_OP_READ; @@ -2294,12 +3070,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( /* trackbased I/O needs address all memory via TIDAWs, * not just for 64 bit addresses. This allows us to map * each segment directly to one tidaw. + * In the case of write requests, additional tidaws may + * be needed when a segment crosses a track boundary. */ trkcount = last_trk - first_trk + 1; ctidaw = 0; rq_for_each_segment(bv, req, iter) { ++ctidaw; } + if (rq_data_dir(req) == WRITE) + ctidaw += (last_trk - first_trk); /* Allocate the ccw request. */ itcw_size = itcw_calc_size(0, ctidaw, 0); @@ -2307,15 +3087,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( if (IS_ERR(cqr)) return cqr; - cqr->cpmode = 1; - cqr->startdev = startdev; - cqr->memdev = startdev; - cqr->block = block; - cqr->expires = 100*HZ; - cqr->buildclk = get_clock(); - cqr->status = DASD_CQR_FILLED; - cqr->retries = 10; - /* transfer length factor: how many bytes to read from the last track */ if (first_trk == last_trk) tlf = last_offs - first_offs + 1; @@ -2324,8 +3095,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( tlf *= blksize; itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); + if (IS_ERR(itcw)) { + ret = -EINVAL; + goto out_error; + } cqr->cpaddr = itcw_get_tcw(itcw); - if (prepare_itcw(itcw, first_trk, last_trk, cmd, basedev, startdev, first_offs + 1, @@ -2335,49 +3109,93 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( /* Clock not in sync and XRC is enabled. * Try again later. */ - dasd_sfree_request(cqr, startdev); - return ERR_PTR(-EAGAIN); + ret = -EAGAIN; + goto out_error; } - + len_to_track_end = 0; /* * A tidaw can address 4k of memory, but must not cross page boundaries * We can let the block layer handle this by setting * blk_queue_segment_boundary to page boundaries and * blk_max_segment_size to page size when setting up the request queue. + * For write requests, a TIDAW must not cross track boundaries, because + * we have to set the CBC flag on the last tidaw for each track. */ - rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len); - if (IS_ERR(last_tidaw)) - return (struct dasd_ccw_req *)last_tidaw; + if (rq_data_dir(req) == WRITE) { + new_track = 1; + recid = first_rec; + rq_for_each_segment(bv, req, iter) { + dst = page_address(bv.bv_page) + bv.bv_offset; + seg_len = bv.bv_len; + while (seg_len) { + if (new_track) { + trkid = recid; + offs = sector_div(trkid, blk_per_trk); + count_to_trk_end = blk_per_trk - offs; + count = min((last_rec - recid + 1), + (sector_t)count_to_trk_end); + len_to_track_end = count * blksize; + recid += count; + new_track = 0; + } + part_len = min(seg_len, len_to_track_end); + seg_len -= part_len; + len_to_track_end -= part_len; + /* We need to end the tidaw at track end */ + if (!len_to_track_end) { + new_track = 1; + tidaw_flags = TIDAW_FLAGS_INSERT_CBC; + } else + tidaw_flags = 0; + last_tidaw = itcw_add_tidaw(itcw, tidaw_flags, + dst, part_len); + if (IS_ERR(last_tidaw)) { + ret = -EINVAL; + goto out_error; + } + dst += part_len; + } + } + } else { + rq_for_each_segment(bv, req, iter) { + dst = page_address(bv.bv_page) + bv.bv_offset; + last_tidaw = itcw_add_tidaw(itcw, 0x00, + dst, bv.bv_len); + if (IS_ERR(last_tidaw)) { + ret = -EINVAL; + goto out_error; + } + } } - - last_tidaw->flags |= 0x80; + last_tidaw->flags |= TIDAW_FLAGS_LAST; + last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC; itcw_finalize(itcw); if (blk_noretry_request(req) || block->base->features & DASD_FEATURE_FAILFAST) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); + cqr->cpmode = 1; cqr->startdev = startdev; cqr->memdev = startdev; cqr->block = block; - cqr->expires = 5 * 60 * HZ; /* 5 minutes */ - cqr->lpm = private->path_data.ppm; - cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ + cqr->lpm = startdev->path_data.ppm; + cqr->retries = startdev->default_retries; + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; +out_error: + dasd_sfree_request(cqr, startdev); + return ERR_PTR(ret); } static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, struct dasd_block *block, struct request *req) { - int tpm, cmdrtd, cmdwtd; + int cmdrtd, cmdwtd; int use_prefix; -#if defined(CONFIG_64BIT) - int fcx_in_css, fcx_in_gneq, fcx_in_features; -#endif + int fcx_multitrack; struct dasd_eckd_private *private; struct dasd_device *basedev; sector_t first_rec, last_rec; @@ -2385,6 +3203,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, unsigned int first_offs, last_offs; unsigned int blk_per_trk, blksize; int cdlspecial; + unsigned int data_size; struct dasd_ccw_req *cqr; basedev = block->base; @@ -2403,15 +3222,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, last_offs = sector_div(last_trk, blk_per_trk); cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); - /* is transport mode supported? */ -#if defined(CONFIG_64BIT) - fcx_in_css = css_general_characteristics.fcx; - fcx_in_gneq = private->gneq->reserved2[7] & 0x04; - fcx_in_features = private->features.feature[40] & 0x80; - tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; -#else - tpm = 0; -#endif + fcx_multitrack = private->features.feature[40] & 0x20; + data_size = blk_rq_bytes(req); + if (data_size % blksize) + return ERR_PTR(-EINVAL); + /* tpm write request add CBC data on each track boundary */ + if (rq_data_dir(req) == WRITE) + data_size += (last_trk - first_trk) * 4; /* is read track data and write track data in command mode supported? */ cmdrtd = private->features.feature[9] & 0x20; @@ -2421,13 +3238,15 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, cqr = NULL; if (cdlspecial || dasd_page_cache) { /* do nothing, just fall through to the cmd mode single case */ - } else if (!dasd_nofcx && tpm && (first_trk == last_trk)) { + } else if ((data_size <= private->fcx_max_data) + && (fcx_multitrack || (first_trk == last_trk))) { cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, first_rec, last_rec, first_trk, last_trk, first_offs, last_offs, blk_per_trk, blksize); - if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) + if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && + (PTR_ERR(cqr) != -ENOMEM)) cqr = NULL; } else if (use_prefix && (((rq_data_dir(req) == READ) && cmdrtd) || @@ -2437,7 +3256,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, first_trk, last_trk, first_offs, last_offs, blk_per_trk, blksize); - if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) + if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && + (PTR_ERR(cqr) != -ENOMEM)) cqr = NULL; } if (!cqr) @@ -2449,13 +3269,162 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, return cqr; } +static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, + struct dasd_block *block, + struct request *req) +{ + unsigned long *idaws; + struct dasd_device *basedev; + struct dasd_ccw_req *cqr; + struct ccw1 *ccw; + struct req_iterator iter; + struct bio_vec bv; + char *dst; + unsigned char cmd; + unsigned int trkcount; + unsigned int seg_len, len_to_track_end; + unsigned int first_offs; + unsigned int cidaw, cplength, datasize; + sector_t first_trk, last_trk, sectors; + sector_t start_padding_sectors, end_sector_offset, end_padding_sectors; + unsigned int pfx_datasize; + + /* + * raw track access needs to be mutiple of 64k and on 64k boundary + * For read requests we can fix an incorrect alignment by padding + * the request with dummy pages. + */ + start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; + end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % + DASD_RAW_SECTORS_PER_TRACK; + end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) % + DASD_RAW_SECTORS_PER_TRACK; + basedev = block->base; + if ((start_padding_sectors || end_padding_sectors) && + (rq_data_dir(req) == WRITE)) { + DBF_DEV_EVENT(DBF_ERR, basedev, + "raw write not track aligned (%lu,%lu) req %p", + start_padding_sectors, end_padding_sectors, req); + cqr = ERR_PTR(-EINVAL); + goto out; + } + + first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; + last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / + DASD_RAW_SECTORS_PER_TRACK; + trkcount = last_trk - first_trk + 1; + first_offs = 0; + + if (rq_data_dir(req) == READ) + cmd = DASD_ECKD_CCW_READ_TRACK; + else if (rq_data_dir(req) == WRITE) + cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK; + else { + cqr = ERR_PTR(-EINVAL); + goto out; + } + + /* + * Raw track based I/O needs IDAWs for each page, + * and not just for 64 bit addresses. + */ + cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK; + + /* 1x prefix + one read/write ccw per track */ + cplength = 1 + trkcount; + + /* + * struct PFX_eckd_data has up to 2 byte as extended parameter + * this is needed for write full track and has to be mentioned + * separately + * add 8 instead of 2 to keep 8 byte boundary + */ + pfx_datasize = sizeof(struct PFX_eckd_data) + 8; + + datasize = pfx_datasize + cidaw * sizeof(unsigned long long); + + /* Allocate the ccw request. */ + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, + datasize, startdev); + if (IS_ERR(cqr)) + goto out; + ccw = cqr->cpaddr; + + if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd, + basedev, startdev, 1 /* format */, first_offs + 1, + trkcount, 0, 0) == -EAGAIN) { + /* Clock not in sync and XRC is enabled. + * Try again later. + */ + dasd_sfree_request(cqr, startdev); + cqr = ERR_PTR(-EAGAIN); + goto out; + } + + idaws = (unsigned long *)(cqr->data + pfx_datasize); + len_to_track_end = 0; + if (start_padding_sectors) { + ccw[-1].flags |= CCW_FLAG_CC; + ccw->cmd_code = cmd; + /* maximum 3390 track size */ + ccw->count = 57326; + /* 64k map to one track */ + len_to_track_end = 65536 - start_padding_sectors * 512; + ccw->cda = (__u32)(addr_t)idaws; + ccw->flags |= CCW_FLAG_IDA; + ccw->flags |= CCW_FLAG_SLI; + ccw++; + for (sectors = 0; sectors < start_padding_sectors; sectors += 8) + idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); + } + rq_for_each_segment(bv, req, iter) { + dst = page_address(bv.bv_page) + bv.bv_offset; + seg_len = bv.bv_len; + if (cmd == DASD_ECKD_CCW_READ_TRACK) + memset(dst, 0, seg_len); + if (!len_to_track_end) { + ccw[-1].flags |= CCW_FLAG_CC; + ccw->cmd_code = cmd; + /* maximum 3390 track size */ + ccw->count = 57326; + /* 64k map to one track */ + len_to_track_end = 65536; + ccw->cda = (__u32)(addr_t)idaws; + ccw->flags |= CCW_FLAG_IDA; + ccw->flags |= CCW_FLAG_SLI; + ccw++; + } + len_to_track_end -= seg_len; + idaws = idal_create_words(idaws, dst, seg_len); + } + for (sectors = 0; sectors < end_padding_sectors; sectors += 8) + idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); + if (blk_noretry_request(req) || + block->base->features & DASD_FEATURE_FAILFAST) + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); + cqr->startdev = startdev; + cqr->memdev = startdev; + cqr->block = block; + cqr->expires = startdev->default_expires * HZ; + cqr->lpm = startdev->path_data.ppm; + cqr->retries = startdev->default_retries; + cqr->buildclk = get_tod_clock(); + cqr->status = DASD_CQR_FILLED; + + if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) + cqr = NULL; +out: + return cqr; +} + + static int dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) { struct dasd_eckd_private *private; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst, *cda; unsigned int blksize, blk_per_trk, off; sector_t recid; @@ -2473,8 +3442,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) if (private->uses_cdl == 0 || recid > 2*blk_per_trk) ccw++; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += blksize) { + dst = page_address(bv.bv_page) + bv.bv_offset; + for (off = 0; off < bv.bv_len; off += blksize) { /* Skip locate record. */ if (private->uses_cdl && recid <= 2*blk_per_trk) ccw++; @@ -2485,7 +3454,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) cda = (char *)((addr_t) ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) - memcpy(dst, cda, bv->bv_len); + memcpy(dst, cda, bv.bv_len); kmem_cache_free(dasd_page_cache, (void *)((addr_t)cda & PAGE_MASK)); } @@ -2553,7 +3522,10 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); private->count++; - cqr = dasd_eckd_build_cp(startdev, block, req); + if ((base->features & DASD_FEATURE_USERAW)) + cqr = dasd_raw_build_cp(startdev, block, req); + else + cqr = dasd_eckd_build_cp(startdev, block, req); if (IS_ERR(cqr)) private->count--; spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); @@ -2608,15 +3580,23 @@ dasd_eckd_release(struct dasd_device *device) struct dasd_ccw_req *cqr; int rc; struct ccw1 *ccw; + int useglobal; if (!capable(CAP_SYS_ADMIN)) return -EACCES; + useglobal = 0; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); if (IS_ERR(cqr)) { - DBF_DEV_EVENT(DBF_WARNING, device, "%s", - "Could not allocate initialization request"); - return PTR_ERR(cqr); + mutex_lock(&dasd_reserve_mutex); + useglobal = 1; + cqr = &dasd_reserve_req->cqr; + memset(cqr, 0, sizeof(*cqr)); + memset(&dasd_reserve_req->ccw, 0, + sizeof(dasd_reserve_req->ccw)); + cqr->cpaddr = &dasd_reserve_req->ccw; + cqr->data = &dasd_reserve_req->data; + cqr->magic = DASD_ECKD_MAGIC; } ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_RELEASE; @@ -2629,12 +3609,17 @@ dasd_eckd_release(struct dasd_device *device) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); + if (!rc) + clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); - dasd_sfree_request(cqr, cqr->memdev); + if (useglobal) + mutex_unlock(&dasd_reserve_mutex); + else + dasd_sfree_request(cqr, cqr->memdev); return rc; } @@ -2650,15 +3635,23 @@ dasd_eckd_reserve(struct dasd_device *device) struct dasd_ccw_req *cqr; int rc; struct ccw1 *ccw; + int useglobal; if (!capable(CAP_SYS_ADMIN)) return -EACCES; + useglobal = 0; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); if (IS_ERR(cqr)) { - DBF_DEV_EVENT(DBF_WARNING, device, "%s", - "Could not allocate initialization request"); - return PTR_ERR(cqr); + mutex_lock(&dasd_reserve_mutex); + useglobal = 1; + cqr = &dasd_reserve_req->cqr; + memset(cqr, 0, sizeof(*cqr)); + memset(&dasd_reserve_req->ccw, 0, + sizeof(dasd_reserve_req->ccw)); + cqr->cpaddr = &dasd_reserve_req->ccw; + cqr->data = &dasd_reserve_req->data; + cqr->magic = DASD_ECKD_MAGIC; } ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_RESERVE; @@ -2671,12 +3664,17 @@ dasd_eckd_reserve(struct dasd_device *device) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); + if (!rc) + set_bit(DASD_FLAG_IS_RESERVED, &device->flags); - dasd_sfree_request(cqr, cqr->memdev); + if (useglobal) + mutex_unlock(&dasd_reserve_mutex); + else + dasd_sfree_request(cqr, cqr->memdev); return rc; } @@ -2691,15 +3689,23 @@ dasd_eckd_steal_lock(struct dasd_device *device) struct dasd_ccw_req *cqr; int rc; struct ccw1 *ccw; + int useglobal; if (!capable(CAP_SYS_ADMIN)) return -EACCES; + useglobal = 0; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); if (IS_ERR(cqr)) { - DBF_DEV_EVENT(DBF_WARNING, device, "%s", - "Could not allocate initialization request"); - return PTR_ERR(cqr); + mutex_lock(&dasd_reserve_mutex); + useglobal = 1; + cqr = &dasd_reserve_req->cqr; + memset(cqr, 0, sizeof(*cqr)); + memset(&dasd_reserve_req->ccw, 0, + sizeof(dasd_reserve_req->ccw)); + cqr->cpaddr = &dasd_reserve_req->ccw; + cqr->data = &dasd_reserve_req->data; + cqr->magic = DASD_ECKD_MAGIC; } ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_SLCK; @@ -2712,12 +3718,85 @@ dasd_eckd_steal_lock(struct dasd_device *device) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); + if (!rc) + set_bit(DASD_FLAG_IS_RESERVED, &device->flags); - dasd_sfree_request(cqr, cqr->memdev); + if (useglobal) + mutex_unlock(&dasd_reserve_mutex); + else + dasd_sfree_request(cqr, cqr->memdev); + return rc; +} + +/* + * SNID - Sense Path Group ID + * This ioctl may be used in situations where I/O is stalled due to + * a reserve, so if the normal dasd_smalloc_request fails, we use the + * preallocated dasd_reserve_req. + */ +static int dasd_eckd_snid(struct dasd_device *device, + void __user *argp) +{ + struct dasd_ccw_req *cqr; + int rc; + struct ccw1 *ccw; + int useglobal; + struct dasd_snid_ioctl_data usrparm; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (copy_from_user(&usrparm, argp, sizeof(usrparm))) + return -EFAULT; + + useglobal = 0; + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, + sizeof(struct dasd_snid_data), device); + if (IS_ERR(cqr)) { + mutex_lock(&dasd_reserve_mutex); + useglobal = 1; + cqr = &dasd_reserve_req->cqr; + memset(cqr, 0, sizeof(*cqr)); + memset(&dasd_reserve_req->ccw, 0, + sizeof(dasd_reserve_req->ccw)); + cqr->cpaddr = &dasd_reserve_req->ccw; + cqr->data = &dasd_reserve_req->data; + cqr->magic = DASD_ECKD_MAGIC; + } + ccw = cqr->cpaddr; + ccw->cmd_code = DASD_ECKD_CCW_SNID; + ccw->flags |= CCW_FLAG_SLI; + ccw->count = 12; + ccw->cda = (__u32)(addr_t) cqr->data; + cqr->startdev = device; + cqr->memdev = device; + clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); + set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); + cqr->retries = 5; + cqr->expires = 10 * HZ; + cqr->buildclk = get_tod_clock(); + cqr->status = DASD_CQR_FILLED; + cqr->lpm = usrparm.path_mask; + + rc = dasd_sleep_on_immediatly(cqr); + /* verify that I/O processing didn't modify the path mask */ + if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask)) + rc = -EIO; + if (!rc) { + usrparm.data = *((struct dasd_snid_data *)cqr->data); + if (copy_to_user(argp, &usrparm, sizeof(usrparm))) + rc = -EFAULT; + } + + if (useglobal) + mutex_unlock(&dasd_reserve_mutex); + else + dasd_sfree_request(cqr, cqr->memdev); return rc; } @@ -2770,7 +3849,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) ccw->count = sizeof(struct dasd_rssd_perf_stats_t); ccw->cda = (__u32)(addr_t) stats; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on(cqr); if (rc == 0) { @@ -2896,7 +3975,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) cqr->memdev = device; cqr->retries = 3; cqr->expires = 10 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; /* Build the ccws */ @@ -2956,10 +4035,12 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) return dasd_eckd_reserve(device); case BIODASDSLCK: return dasd_eckd_steal_lock(device); + case BIODASDSNID: + return dasd_eckd_snid(device, argp); case BIODASDSYMMIO: return dasd_symm_io(device, argp); default: - return -ENOIOCTLCMD; + return -ENOTTY; } } @@ -2975,7 +4056,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) len = 0; while (from <= to) { - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " CCW %p: %08X %08X DAT:", from, ((int *) from)[0], ((int *) from)[1]); @@ -3002,19 +4083,19 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, char *reason) { u64 *sense; + u64 *stat; sense = (u64 *) dasd_get_sense(irb); + stat = (u64 *) &irb->scsw; if (sense) { - DBF_DEV_EVENT(DBF_EMERG, device, - "%s: %s %02x%02x%02x %016llx %016llx %016llx " - "%016llx", reason, - scsw_is_tm(&irb->scsw) ? "t" : "c", - scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw), - scsw_dstat(&irb->scsw), sense[0], sense[1], - sense[2], sense[3]); + DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : " + "%016llx %016llx %016llx %016llx", + reason, *stat, *((u32 *) (stat + 1)), + sense[0], sense[1], sense[2], sense[3]); } else { - DBF_DEV_EVENT(DBF_EMERG, device, "%s", - "SORRY - NO VALID SENSE AVAILABLE\n"); + DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s", + reason, *stat, *((u32 *) (stat + 1)), + "NO VALID SENSE"); } } @@ -3036,20 +4117,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, return; } /* dump the sense data */ - len = sprintf(page, KERN_ERR PRINTK_HEADER + len = sprintf(page, PRINTK_HEADER " I/O status report for device %s:\n", dev_name(&device->cdev->dev)); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER - " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", - req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), - scsw_cc(&irb->scsw), req ? req->intrc : 0); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER + " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " + "CS:%02X RC:%d\n", + req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), + scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), + scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), + req ? req->intrc : 0); + len += sprintf(page + len, PRINTK_HEADER " device %s: Failing CCW: %p\n", dev_name(&device->cdev->dev), (void *) (addr_t) irb->scsw.cmd.cpa); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); @@ -3062,23 +4146,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, if (irb->ecw[27] & DASD_SENSE_BIT_0) { /* 24 Byte Sense Data */ - sprintf(page + len, KERN_ERR PRINTK_HEADER + sprintf(page + len, PRINTK_HEADER " 24 Byte: %x MSG %x, " "%s MSGb to SYSOP\n", irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, irb->ecw[1] & 0x10 ? "" : "no"); } else { /* 32 Byte Sense Data */ - sprintf(page + len, KERN_ERR PRINTK_HEADER + sprintf(page + len, PRINTK_HEADER " 32 Byte: Format: %x " "Exception class %x\n", irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); } } else { - sprintf(page + len, KERN_ERR PRINTK_HEADER + sprintf(page + len, PRINTK_HEADER " SORRY - NO VALID SENSE AVAILABLE\n"); } - printk("%s", page); + printk(KERN_ERR "%s", page); if (req) { /* req == NULL for unsolicited interrupts */ @@ -3087,10 +4171,10 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, first = req->cpaddr; for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); to = min(first + 6, last); - len = sprintf(page, KERN_ERR PRINTK_HEADER + len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req); dasd_eckd_dump_ccw_range(first, to, page + len); - printk("%s", page); + printk(KERN_ERR "%s", page); /* print failing CCW area (maximum 4) */ /* scsw->cda is either valid or zero */ @@ -3100,7 +4184,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, irb->scsw.cmd.cpa; /* failing CCW */ if (from < fail - 2) { from = fail - 2; /* there is a gap - print header */ - len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); + len += sprintf(page, PRINTK_HEADER "......\n"); } to = min(fail + 1, last); len += dasd_eckd_dump_ccw_range(from, to, page + len); @@ -3109,11 +4193,11 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, from = max(from, ++to); if (from < last - 1) { from = last - 1; /* there is a gap - print header */ - len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); + len += sprintf(page + len, PRINTK_HEADER "......\n"); } len += dasd_eckd_dump_ccw_range(from, last, page + len); if (len > 0) - printk("%s", page); + printk(KERN_ERR "%s", page); } free_page((unsigned long) page); } @@ -3127,10 +4211,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, { char *page; int len, sl, sct, residual; - struct tsb *tsb; - u8 *sense; - + u8 *sense, *rcq; page = (char *) get_zeroed_page(GFP_ATOMIC); if (page == NULL) { @@ -3139,82 +4221,85 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, return; } /* dump the sense data */ - len = sprintf(page, KERN_ERR PRINTK_HEADER + len = sprintf(page, PRINTK_HEADER " I/O status report for device %s:\n", dev_name(&device->cdev->dev)); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER - " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d " - "fcxs: 0x%02X schxs: 0x%02X\n", req, - scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), - scsw_cc(&irb->scsw), req->intrc, - irb->scsw.tm.fcxs, irb->scsw.tm.schxs); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER + " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " + "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", + req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), + scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), + scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), + irb->scsw.tm.fcxs, irb->scsw.tm.schxs, + req ? req->intrc : 0); + len += sprintf(page + len, PRINTK_HEADER " device %s: Failing TCW: %p\n", dev_name(&device->cdev->dev), (void *) (addr_t) irb->scsw.tm.tcw); tsb = NULL; sense = NULL; - if (irb->scsw.tm.tcw) + if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01)) tsb = tcw_get_tsb( (struct tcw *)(unsigned long)irb->scsw.tm.tcw); - if (tsb && (irb->scsw.tm.fcxs == 0x01)) { - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + if (tsb) { + len += sprintf(page + len, PRINTK_HEADER " tsb->length %d\n", tsb->length); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " tsb->flags %x\n", tsb->flags); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " tsb->dcw_offset %d\n", tsb->dcw_offset); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " tsb->count %d\n", tsb->count); residual = tsb->count - 28; - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " residual %d\n", residual); switch (tsb->flags & 0x07) { case 1: /* tsa_iostat */ - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " tsb->tsa.iostat.dev_time %d\n", tsb->tsa.iostat.dev_time); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " tsb->tsa.iostat.def_time %d\n", tsb->tsa.iostat.def_time); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " tsb->tsa.iostat.queue_time %d\n", tsb->tsa.iostat.queue_time); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " tsb->tsa.iostat.dev_busy_time %d\n", tsb->tsa.iostat.dev_busy_time); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " tsb->tsa.iostat.dev_act_time %d\n", tsb->tsa.iostat.dev_act_time); sense = tsb->tsa.iostat.sense; break; case 2: /* ts_ddpc */ - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER - " tsb->tsa.ddpc.rcq: "); - for (sl = 0; sl < 16; sl++) { + for (sl = 0; sl < 2; sl++) { + len += sprintf(page + len, PRINTK_HEADER + " tsb->tsa.ddpc.rcq %2d-%2d: ", + (8 * sl), ((8 * sl) + 7)); + rcq = tsb->tsa.ddpc.rcq; for (sct = 0; sct < 8; sct++) { len += sprintf(page + len, " %02x", - tsb->tsa.ddpc.rcq[sl]); + rcq[8 * sl + sct]); } len += sprintf(page + len, "\n"); } sense = tsb->tsa.ddpc.sense; break; case 3: /* tsa_intrg */ - len += sprintf(page + len, KERN_ERR PRINTK_HEADER - " tsb->tsa.intrg.: not supportet yet \n"); + len += sprintf(page + len, PRINTK_HEADER + " tsb->tsa.intrg.: not supportet yet\n"); break; } if (sense) { for (sl = 0; sl < 4; sl++) { - len += sprintf(page + len, - KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); for (sct = 0; sct < 8; sct++) { @@ -3226,40 +4311,40 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, if (sense[27] & DASD_SENSE_BIT_0) { /* 24 Byte Sense Data */ - sprintf(page + len, KERN_ERR PRINTK_HEADER + sprintf(page + len, PRINTK_HEADER " 24 Byte: %x MSG %x, " "%s MSGb to SYSOP\n", sense[7] >> 4, sense[7] & 0x0f, sense[1] & 0x10 ? "" : "no"); } else { /* 32 Byte Sense Data */ - sprintf(page + len, KERN_ERR PRINTK_HEADER + sprintf(page + len, PRINTK_HEADER " 32 Byte: Format: %x " "Exception class %x\n", sense[6] & 0x0f, sense[22] >> 4); } } else { - sprintf(page + len, KERN_ERR PRINTK_HEADER + sprintf(page + len, PRINTK_HEADER " SORRY - NO VALID SENSE AVAILABLE\n"); } } else { - sprintf(page + len, KERN_ERR PRINTK_HEADER + sprintf(page + len, PRINTK_HEADER " SORRY - NO TSB DATA AVAILABLE\n"); } - printk("%s", page); + printk(KERN_ERR "%s", page); free_page((unsigned long) page); } static void dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req *req, struct irb *irb) { - if (req && scsw_is_tm(&req->irb.scsw)) + if (scsw_is_tm(&irb->scsw)) dasd_eckd_dump_sense_tcw(device, req, irb); else dasd_eckd_dump_sense_ccw(device, req, irb); } -int dasd_eckd_pm_freeze(struct dasd_device *device) +static int dasd_eckd_pm_freeze(struct dasd_device *device) { /* * the device should be disconnected from our LCU structure @@ -3272,45 +4357,41 @@ int dasd_eckd_pm_freeze(struct dasd_device *device) return 0; } -int dasd_eckd_restore_device(struct dasd_device *device) +static int dasd_eckd_restore_device(struct dasd_device *device) { struct dasd_eckd_private *private; struct dasd_eckd_characteristics temp_rdc_data; - int is_known, rc; + int rc; struct dasd_uid temp_uid; unsigned long flags; + unsigned long cqr_flags = 0; private = (struct dasd_eckd_private *) device->private; /* Read Configuration Data */ - rc = dasd_eckd_read_conf(device); - if (rc) - goto out_err; + dasd_eckd_read_conf(device); - /* Generate device unique id and register in devmap */ - rc = dasd_eckd_generate_uid(device, &private->uid); - dasd_get_uid(device->cdev, &temp_uid); + dasd_eckd_get_uid(device, &temp_uid); + /* Generate device unique id */ + rc = dasd_eckd_generate_uid(device); + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) dev_err(&device->cdev->dev, "The UID of the DASD has " "changed\n"); + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (rc) goto out_err; - dasd_set_uid(device->cdev, &private->uid); /* register lcu with alias handling, enable PAV if this is a new lcu */ - is_known = dasd_alias_make_device_known_to_lcu(device); - if (is_known < 0) - return is_known; - if (!is_known) { - dasd_eckd_validate_server(device); - dasd_alias_lcu_setup_complete(device); - } else - dasd_alias_wait_for_lcu_setup(device); + rc = dasd_alias_make_device_known_to_lcu(device); + if (rc) + return rc; + + set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags); + dasd_eckd_validate_server(device, cqr_flags); /* RE-Read Configuration Data */ - rc = dasd_eckd_read_conf(device); - if (rc) - goto out_err; + dasd_eckd_read_conf(device); /* Read Feature Codes */ dasd_eckd_read_features(device); @@ -3336,18 +4417,74 @@ out_err: return -1; } +static int dasd_eckd_reload_device(struct dasd_device *device) +{ + struct dasd_eckd_private *private; + int rc, old_base; + char print_uid[60]; + struct dasd_uid uid; + unsigned long flags; + + private = (struct dasd_eckd_private *) device->private; + + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + old_base = private->uid.base_unit_addr; + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + + /* Read Configuration Data */ + rc = dasd_eckd_read_conf(device); + if (rc) + goto out_err; + + rc = dasd_eckd_generate_uid(device); + if (rc) + goto out_err; + /* + * update unit address configuration and + * add device to alias management + */ + dasd_alias_update_add_device(device); + + dasd_eckd_get_uid(device, &uid); + + if (old_base != uid.base_unit_addr) { + if (strlen(uid.vduit) > 0) + snprintf(print_uid, sizeof(print_uid), + "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial, + uid.ssid, uid.base_unit_addr, uid.vduit); + else + snprintf(print_uid, sizeof(print_uid), + "%s.%s.%04x.%02x", uid.vendor, uid.serial, + uid.ssid, uid.base_unit_addr); + + dev_info(&device->cdev->dev, + "An Alias device was reassigned to a new base device " + "with UID: %s\n", print_uid); + } + return 0; + +out_err: + return -1; +} + static struct ccw_driver dasd_eckd_driver = { - .name = "dasd-eckd", - .owner = THIS_MODULE, + .driver = { + .name = "dasd-eckd", + .owner = THIS_MODULE, + }, .ids = dasd_eckd_ids, .probe = dasd_eckd_probe, .remove = dasd_generic_remove, .set_offline = dasd_generic_set_offline, .set_online = dasd_eckd_set_online, .notify = dasd_generic_notify, + .path_event = dasd_generic_path_event, + .shutdown = dasd_generic_shutdown, .freeze = dasd_generic_pm_freeze, .thaw = dasd_generic_restore_device, .restore = dasd_generic_restore_device, + .uc_handler = dasd_generic_uc_handler, + .int_class = IRQIO_DAS, }; /* @@ -3367,12 +4504,14 @@ static struct dasd_discipline dasd_eckd_discipline = { .owner = THIS_MODULE, .name = "ECKD", .ebcname = "ECKD", - .max_blocks = 240, + .max_blocks = 190, .check_device = dasd_eckd_check_characteristics, .uncheck_device = dasd_eckd_uncheck_device, .do_analysis = dasd_eckd_do_analysis, - .ready_to_online = dasd_eckd_ready_to_online, + .verify_path = dasd_eckd_verify_path, + .basic_to_ready = dasd_eckd_basic_to_ready, .online_to_ready = dasd_eckd_online_to_ready, + .ready_to_basic = dasd_eckd_ready_to_basic, .fill_geometry = dasd_eckd_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, @@ -3380,7 +4519,7 @@ static struct dasd_discipline dasd_eckd_discipline = { .format_device = dasd_eckd_format_device, .erp_action = dasd_eckd_erp_action, .erp_postaction = dasd_eckd_erp_postaction, - .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt, + .check_for_device_change = dasd_eckd_check_for_device_change, .build_cp = dasd_eckd_build_alias_cp, .free_cp = dasd_eckd_free_alias_cp, .dump_sense = dasd_eckd_dump_sense, @@ -3389,6 +4528,9 @@ static struct dasd_discipline dasd_eckd_discipline = { .ioctl = dasd_eckd_ioctl, .freeze = dasd_eckd_pm_freeze, .restore = dasd_eckd_restore_device, + .reload = dasd_eckd_reload_device, + .get_uid = dasd_eckd_get_uid, + .kick_validate = dasd_eckd_kick_validate_server, }; static int __init @@ -3397,10 +4539,30 @@ dasd_eckd_init(void) int ret; ASCEBC(dasd_eckd_discipline.ebcname, 4); + dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req), + GFP_KERNEL | GFP_DMA); + if (!dasd_reserve_req) + return -ENOMEM; + path_verification_worker = kmalloc(sizeof(*path_verification_worker), + GFP_KERNEL | GFP_DMA); + if (!path_verification_worker) { + kfree(dasd_reserve_req); + return -ENOMEM; + } + rawpadpage = (void *)__get_free_page(GFP_KERNEL); + if (!rawpadpage) { + kfree(path_verification_worker); + kfree(dasd_reserve_req); + return -ENOMEM; + } ret = ccw_driver_register(&dasd_eckd_driver); if (!ret) wait_for_device_probe(); - + else { + kfree(path_verification_worker); + kfree(dasd_reserve_req); + free_page((unsigned long)rawpadpage); + } return ret; } @@ -3408,6 +4570,9 @@ static void __exit dasd_eckd_cleanup(void) { ccw_driver_unregister(&dasd_eckd_driver); + kfree(path_verification_worker); + kfree(dasd_reserve_req); + free_page((unsigned long)rawpadpage); } module_init(dasd_eckd_init); diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index 864d53c0420..2555e494591 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h @@ -1,9 +1,8 @@ /* - * File...........: linux/drivers/s390/block/dasd_eckd.h * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 + * Copyright IBM Corp. 1999, 2000 * */ @@ -27,6 +26,7 @@ #define DASD_ECKD_CCW_WRITE_CKD 0x1d #define DASD_ECKD_CCW_READ_CKD 0x1e #define DASD_ECKD_CCW_PSF 0x27 +#define DASD_ECKD_CCW_SNID 0x34 #define DASD_ECKD_CCW_RSSD 0x3e #define DASD_ECKD_CCW_LOCATE_RECORD 0x47 #define DASD_ECKD_CCW_SNSS 0x54 @@ -36,14 +36,17 @@ #define DASD_ECKD_CCW_WRITE_KD_MT 0x8d #define DASD_ECKD_CCW_READ_KD_MT 0x8e #define DASD_ECKD_CCW_RELEASE 0x94 +#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95 #define DASD_ECKD_CCW_READ_CKD_MT 0x9e #define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d #define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5 #define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6 #define DASD_ECKD_CCW_RESERVE 0xB4 +#define DASD_ECKD_CCW_READ_TRACK 0xDE #define DASD_ECKD_CCW_PFX 0xE7 #define DASD_ECKD_CCW_PFX_READ 0xEA #define DASD_ECKD_CCW_RSCK 0xF9 +#define DASD_ECKD_CCW_RCD 0xFA /* * Perform Subsystem Function / Sub-Orders @@ -56,6 +59,11 @@ */ #define LV_COMPAT_CYL 0xFFFE + +#define FCX_MAX_DATA_FACTOR 65536 +#define DASD_ECKD_RCD_DATA_SIZE 256 + + /***************************************************************************** * SECTION: Type Definitions ****************************************************************************/ @@ -320,17 +328,16 @@ struct dasd_gneq { __u8 identifier:2; __u8 reserved:6; } __attribute__ ((packed)) flags; - __u8 reserved[7]; + __u8 reserved[5]; + struct { + __u8 value:2; + __u8 number:6; + } __attribute__ ((packed)) timeout; + __u8 reserved3; __u16 subsystemID; __u8 reserved2[22]; } __attribute__ ((packed)); -struct dasd_eckd_path { - __u8 opm; - __u8 ppm; - __u8 npm; -}; - struct dasd_rssd_features { char feature[256]; } __attribute__((packed)); @@ -426,7 +433,6 @@ struct alias_pav_group { struct dasd_device *next; }; - struct dasd_eckd_private { struct dasd_eckd_characteristics rdc_data; u8 *conf_data; @@ -437,7 +443,6 @@ struct dasd_eckd_private { struct vd_sneq *vdsneq; struct dasd_gneq *gneq; - struct dasd_eckd_path path_data; struct eckd_count count_area[5]; int init_cqr_status; int uses_cdl; @@ -450,6 +455,8 @@ struct dasd_eckd_private { struct alias_pav_group *pavgroup; struct alias_lcu *lcu; int count; + + u32 fcx_max_data; }; @@ -463,4 +470,5 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *); void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); void dasd_alias_lcu_setup_complete(struct dasd_device *); void dasd_alias_wait_for_lcu_setup(struct dasd_device *); +int dasd_alias_update_add_device(struct dasd_device *); #endif /* DASD_ECKD_H */ diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 1f3e967aaba..21ef63cf096 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -1,7 +1,7 @@ /* * Character device driver for extended error reporting. * - * Copyright (C) 2005 IBM Corporation + * Copyright IBM Corp. 2005 * extended error reporting for DASD ECKD devices * Author(s): Stefan Weinhuber <wein@de.ibm.com> */ @@ -17,11 +17,11 @@ #include <linux/device.h> #include <linux/poll.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/err.h> +#include <linux/slab.h> #include <asm/uaccess.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/ebcdic.h> #include "dasd_int.h" @@ -473,6 +473,7 @@ int dasd_eer_enable(struct dasd_device *device) cqr->retries = 255; cqr->expires = 10 * HZ; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); + set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_SNSS; @@ -480,7 +481,7 @@ int dasd_eer_enable(struct dasd_device *device) ccw->flags = 0; ccw->cda = (__u32)(addr_t) cqr->data; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; cqr->callback = dasd_eer_snss_cb; @@ -669,6 +670,7 @@ static const struct file_operations dasd_eer_fops = { .read = &dasd_eer_read, .poll = &dasd_eer_poll, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice *dasd_eer_dev = NULL; @@ -700,7 +702,7 @@ int __init dasd_eer_init(void) void dasd_eer_exit(void) { if (dasd_eer_dev) { - WARN_ON(misc_deregister(dasd_eer_dev) != 0); + misc_deregister(dasd_eer_dev); kfree(dasd_eer_dev); dasd_eer_dev = NULL; } diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index 7656384a811..e1e88486b2b 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -1,11 +1,10 @@ /* - * File...........: linux/drivers/s390/block/dasd.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 + * Copyright IBM Corp. 1999, 2001 * */ @@ -96,13 +95,14 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) DBF_DEV_EVENT(DBF_DEBUG, device, "default ERP called (%i retries left)", cqr->retries); - cqr->lpm = LPM_ANYPATH; + if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) + cqr->lpm = device->path_data.opm; cqr->status = DASD_CQR_FILLED; } else { pr_err("%s: default ERP has run out of retries and failed\n", dev_name(&device->cdev->dev)); cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); } return cqr; } /* end dasd_default_erp_action */ @@ -124,10 +124,15 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr) { int success; + unsigned long long startclk, stopclk; + struct dasd_device *startdev; BUG_ON(cqr->refers == NULL || cqr->function == NULL); success = cqr->status == DASD_CQR_DONE; + startclk = cqr->startclk; + stopclk = cqr->stopclk; + startdev = cqr->startdev; /* free all ERPs - but NOT the original cqr */ while (cqr->refers != NULL) { @@ -142,11 +147,14 @@ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr) } /* set corresponding status to original cqr */ + cqr->startclk = startclk; + cqr->stopclk = stopclk; + cqr->startdev = startdev; if (success) cqr->status = DASD_CQR_DONE; else { cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); } return cqr; @@ -159,6 +167,16 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) struct dasd_device *device; device = cqr->startdev; + if (cqr->intrc == -ETIMEDOUT) { + dev_err(&device->cdev->dev, + "A timeout error occurred for cqr %p", cqr); + return; + } + if (cqr->intrc == -ENOLINK) { + dev_err(&device->cdev->dev, + "A transport error occurred for cqr %p", cqr); + return; + } /* dump sense data */ if (device->discipline && device->discipline->dump_sense) device->discipline->dump_sense(device, cqr, irb); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 37282b90eec..2c8e68bf9a1 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -1,5 +1,4 @@ /* - * File...........: linux/drivers/s390/block/dasd_fba.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> * Copyright IBM Corp. 1999, 2009 @@ -30,6 +29,8 @@ #endif /* PRINTK_HEADER */ #define PRINTK_HEADER "dasd(fba):" +#define FBA_DEFAULT_RETRIES 32 + #define DASD_FBA_CCW_WRITE 0x41 #define DASD_FBA_CCW_READ 0x42 #define DASD_FBA_CCW_LOCATE 0x43 @@ -65,17 +66,21 @@ dasd_fba_set_online(struct ccw_device *cdev) } static struct ccw_driver dasd_fba_driver = { - .name = "dasd-fba", - .owner = THIS_MODULE, + .driver = { + .name = "dasd-fba", + .owner = THIS_MODULE, + }, .ids = dasd_fba_ids, .probe = dasd_fba_probe, .remove = dasd_generic_remove, .set_offline = dasd_generic_set_offline, .set_online = dasd_fba_set_online, .notify = dasd_generic_notify, + .path_event = dasd_generic_path_event, .freeze = dasd_generic_pm_freeze, .thaw = dasd_generic_restore_device, .restore = dasd_generic_restore_device, + .int_class = IRQIO_DAS, }; static void @@ -163,6 +168,10 @@ dasd_fba_check_characteristics(struct dasd_device *device) return rc; } + device->default_expires = DASD_EXPIRES; + device->default_retries = FBA_DEFAULT_RETRIES; + device->path_data.opm = LPM_ANYPATH; + readonly = dasd_device_is_ro(device); if (readonly) set_bit(DASD_FLAG_DEVICE_RO, &device->flags); @@ -229,24 +238,16 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr) return NULL; } -static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, - struct irb *irb) +static void dasd_fba_check_for_device_change(struct dasd_device *device, + struct dasd_ccw_req *cqr, + struct irb *irb) { char mask; /* first of all check for state change pending interrupt */ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; - if ((irb->scsw.cmd.dstat & mask) == mask) { + if ((irb->scsw.cmd.dstat & mask) == mask) dasd_generic_handle_state_change(device); - return; - } - - /* check for unsolicited interrupts */ - DBF_DEV_EVENT(DBF_WARNING, device, "%s", - "unsolicited interrupt received"); - device->discipline->dump_sense_dbf(device, irb, "unsolicited"); - dasd_schedule_device_bh(device); - return; }; static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, @@ -259,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; int count, cidaw, cplength, datasize; sector_t recid, first_rec, last_rec; @@ -282,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, count = 0; cidaw = 0; rq_for_each_segment(bv, req, iter) { - if (bv->bv_len & (blksize - 1)) + if (bv.bv_len & (blksize - 1)) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); - count += bv->bv_len >> (block->s2b_shift + 9); + count += bv.bv_len >> (block->s2b_shift + 9); #if defined(CONFIG_64BIT) - if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) - cidaw += bv->bv_len / blksize; + if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) + cidaw += bv.bv_len / blksize; #endif } /* Paranoia. */ @@ -325,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, } recid = first_rec; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; + dst = page_address(bv.bv_page) + bv.bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, GFP_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) - memcpy(copy + bv->bv_offset, dst, bv->bv_len); + memcpy(copy + bv.bv_offset, dst, bv.bv_len); if (copy) - dst = copy + bv->bv_offset; + dst = copy + bv.bv_offset; } - for (off = 0; off < bv->bv_len; off += blksize) { + for (off = 0; off < bv.bv_len; off += blksize) { /* Locate record for stupid devices. */ if (private->rdc_data.mode.bits.data_chain == 0) { ccw[-1].flags |= CCW_FLAG_CC; @@ -370,9 +371,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, cqr->startdev = memdev; cqr->memdev = memdev; cqr->block = block; - cqr->expires = 5 * 60 * HZ; /* 5 minutes */ - cqr->retries = 32; - cqr->buildclk = get_clock(); + cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */ + cqr->retries = memdev->default_retries; + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -383,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) struct dasd_fba_private *private; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst, *cda; unsigned int blksize, off; int status; @@ -398,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) if (private->rdc_data.mode.bits.data_chain != 0) ccw++; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += blksize) { + dst = page_address(bv.bv_page) + bv.bv_offset; + for (off = 0; off < bv.bv_len; off += blksize) { /* Skip locate record. */ if (private->rdc_data.mode.bits.data_chain == 0) ccw++; @@ -410,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) cda = (char *)((addr_t) ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) - memcpy(dst, cda, bv->bv_len); + memcpy(dst, cda, bv.bv_len); kmem_cache_free(dasd_page_cache, (void *)((addr_t)cda & PAGE_MASK)); } @@ -427,7 +428,10 @@ out: static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr) { - cqr->status = DASD_CQR_FILLED; + if (cqr->retries < 0) + cqr->status = DASD_CQR_FAILED; + else + cqr->status = DASD_CQR_FILLED; }; static int @@ -481,19 +485,19 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, "No memory to dump sense data"); return; } - len = sprintf(page, KERN_ERR PRINTK_HEADER + len = sprintf(page, PRINTK_HEADER " I/O status report for device %s:\n", dev_name(&device->cdev->dev)); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " in req: %p CS: 0x%02X DS: 0x%02X\n", req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " device %s: Failing CCW: %p\n", dev_name(&device->cdev->dev), (void *) (addr_t) irb->scsw.cmd.cpa); if (irb->esw.esw0.erw.cons) { for (sl = 0; sl < 4; sl++) { - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " Sense(hex) %2d-%2d:", (8 * sl), ((8 * sl) + 7)); @@ -504,7 +508,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, len += sprintf(page + len, "\n"); } } else { - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " SORRY - NO VALID SENSE AVAILABLE\n"); } printk(KERN_ERR "%s", page); @@ -514,10 +518,9 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, act = req->cpaddr; for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); end = min(act + 8, last); - len = sprintf(page, KERN_ERR PRINTK_HEADER - " Related CP in req: %p\n", req); + len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req); while (act <= end) { - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " CCW %p: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; @@ -535,11 +538,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, len = 0; if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; - len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); + len += sprintf(page + len, PRINTK_HEADER "......\n"); } end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); while (act <= end) { - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " CCW %p: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; @@ -554,10 +557,10 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, /* print last CCWs */ if (act < last - 2) { act = last - 2; - len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); + len += sprintf(page + len, PRINTK_HEADER "......\n"); } while (act <= last) { - len += sprintf(page + len, KERN_ERR PRINTK_HEADER + len += sprintf(page + len, PRINTK_HEADER " CCW %p: %08X %08X DAT:", act, ((int *) act)[0], ((int *) act)[1]); for (count = 0; count < 32 && count < act->count; @@ -594,13 +597,14 @@ static struct dasd_discipline dasd_fba_discipline = { .max_blocks = 96, .check_device = dasd_fba_check_characteristics, .do_analysis = dasd_fba_do_analysis, + .verify_path = dasd_generic_verify_path, .fill_geometry = dasd_fba_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, .handle_terminated_request = dasd_fba_handle_terminated_request, .erp_action = dasd_fba_erp_action, .erp_postaction = dasd_fba_erp_postaction, - .handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt, + .check_for_device_change = dasd_fba_check_for_device_change, .build_cp = dasd_fba_build_cp, .free_cp = dasd_fba_free_cp, .dump_sense = dasd_fba_dump_sense, diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h index 14c910baa5f..b5d3db0e5ef 100644 --- a/drivers/s390/block/dasd_fba.h +++ b/drivers/s390/block/dasd_fba.h @@ -1,8 +1,7 @@ /* - * File...........: linux/drivers/s390/block/dasd_fba.h * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 + * Coypright IBM Corp. 1999, 2000 * */ diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 30a1ca3d08b..f224d59c4b6 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -1,11 +1,10 @@ /* - * File...........: linux/drivers/s390/block/dasd_genhd.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 + * Copyright IBM Corp. 1999, 2001 * * gendisk related functions for the dasd driver. * @@ -73,7 +72,7 @@ int dasd_gendisk_alloc(struct dasd_block *block) if (base->features & DASD_FEATURE_READONLY || test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) set_disk_ro(gdp, 1); - gdp->private_data = block; + dasd_add_link_to_gendisk(gdp, base); gdp->queue = block->request_queue; block->gdp = gdp; set_capacity(block->gdp, 0); @@ -88,7 +87,6 @@ void dasd_gendisk_free(struct dasd_block *block) { if (block->gdp) { del_gendisk(block->gdp); - block->gdp->queue = NULL; block->gdp->private_data = NULL; put_disk(block->gdp); block->gdp = NULL; @@ -103,7 +101,7 @@ int dasd_scan_partitions(struct dasd_block *block) struct block_device *bdev; bdev = bdget_disk(block->gdp, 0); - if (!bdev || blkdev_get(bdev, FMODE_READ) < 0) + if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0) return -ENODEV; /* * See fs/partition/check.c:register_disk,rescan_partitions diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index a91d4a97d4f..690001af0d0 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -1,5 +1,4 @@ /* - * File...........: linux/drivers/s390/block/dasd_int.h * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -10,8 +9,6 @@ #ifndef DASD_INT_H #define DASD_INT_H -#ifdef __KERNEL__ - /* we keep old device allocation scheme; IOW, minors are still in 0..255 */ #define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS)) #define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1) @@ -81,6 +78,10 @@ struct dasd_block; #define DASD_SIM_MSG_TO_OP 0x03 #define DASD_SIM_LOG 0x0C +/* lock class for nested cdev lock */ +#define CDEV_NESTED_FIRST 1 +#define CDEV_NESTED_SECOND 2 + /* * SECTION: MACROs for klogd and s390 debug feature (dbf) */ @@ -182,7 +183,7 @@ struct dasd_ccw_req { /* ... and how */ unsigned long starttime; /* jiffies time of request start */ - int expires; /* expiration period in jiffies */ + unsigned long expires; /* expiration period in jiffies */ char lpm; /* logical path mask */ void *data; /* pointer to data area */ @@ -220,15 +221,43 @@ struct dasd_ccw_req { #define DASD_CQR_CLEARED 0x84 /* request was cleared */ #define DASD_CQR_SUCCESS 0x85 /* request was successful */ +/* default expiration time*/ +#define DASD_EXPIRES 300 +#define DASD_EXPIRES_MAX 40000000 +#define DASD_RETRIES 256 +#define DASD_RETRIES_MAX 32768 /* per dasd_ccw_req flags */ #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ #define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */ +#define DASD_CQR_VERIFY_PATH 2 /* path verification request */ +#define DASD_CQR_ALLOW_SLOCK 3 /* Try this request even when lock was + * stolen. Should not be combined with + * DASD_CQR_FLAGS_USE_ERP + */ /* Signature for error recovery functions. */ typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); /* + * Unique identifier for dasd device. + */ +#define UA_NOT_CONFIGURED 0x00 +#define UA_BASE_DEVICE 0x01 +#define UA_BASE_PAV_ALIAS 0x02 +#define UA_HYPER_PAV_ALIAS 0x03 + +struct dasd_uid { + __u8 type; + char vendor[4]; + char serial[15]; + __u16 ssid; + __u8 real_unit_addr; + __u8 base_unit_addr; + char vduit[33]; +}; + +/* * the struct dasd_discipline is * sth like a table of virtual functions, if you think of dasd_eckd * inheriting dasd... @@ -262,13 +291,22 @@ struct dasd_discipline { int (*do_analysis) (struct dasd_block *); /* + * This function is called, when new paths become available. + * Disciplins may use this callback to do necessary setup work, + * e.g. verify that new path is compatible with the current + * configuration. + */ + int (*verify_path)(struct dasd_device *, __u8); + + /* * Last things to do when a device is set online, and first things * when it is set offline. */ - int (*ready_to_online) (struct dasd_device *); + int (*basic_to_ready) (struct dasd_device *); int (*online_to_ready) (struct dasd_device *); + int (*ready_to_basic) (struct dasd_device *); - /* + /* (struct dasd_device *); * Device operation functions. build_cp creates a ccw chain for * a block device request, start_io starts the request and * term_IO cancels it (e.g. in case of a timeout). format_device @@ -282,8 +320,8 @@ struct dasd_discipline { int (*start_IO) (struct dasd_ccw_req *); int (*term_IO) (struct dasd_ccw_req *); void (*handle_terminated_request) (struct dasd_ccw_req *); - struct dasd_ccw_req *(*format_device) (struct dasd_device *, - struct format_data_t *); + int (*format_device) (struct dasd_device *, + struct format_data_t *); int (*free_cp) (struct dasd_ccw_req *, struct request *); /* @@ -300,9 +338,9 @@ struct dasd_discipline { void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, struct irb *); void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *); - - void (*handle_unsolicited_interrupt) (struct dasd_device *, - struct irb *); + void (*check_for_device_change) (struct dasd_device *, + struct dasd_ccw_req *, + struct irb *); /* i/o control functions. */ int (*fill_geometry) (struct dasd_block *, struct hd_geometry *); @@ -312,28 +350,16 @@ struct dasd_discipline { /* suspend/resume functions */ int (*freeze) (struct dasd_device *); int (*restore) (struct dasd_device *); -}; -extern struct dasd_discipline *dasd_diag_discipline_pointer; - -/* - * Unique identifier for dasd device. - */ -#define UA_NOT_CONFIGURED 0x00 -#define UA_BASE_DEVICE 0x01 -#define UA_BASE_PAV_ALIAS 0x02 -#define UA_HYPER_PAV_ALIAS 0x03 + /* reload device after state change */ + int (*reload) (struct dasd_device *); -struct dasd_uid { - __u8 type; - char vendor[4]; - char serial[15]; - __u16 ssid; - __u8 real_unit_addr; - __u8 base_unit_addr; - char vduit[33]; + int (*get_uid) (struct dasd_device *, struct dasd_uid *); + void (*kick_validate) (struct dasd_device *); }; +extern struct dasd_discipline *dasd_diag_discipline_pointer; + /* * Notification numbers for extended error reporting notifications: * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's @@ -350,6 +376,48 @@ struct dasd_uid { #define DASD_EER_STATECHANGE 3 #define DASD_EER_PPRCSUSPEND 4 +struct dasd_path { + __u8 opm; + __u8 tbvpm; + __u8 ppm; + __u8 npm; +}; + +struct dasd_profile_info { + /* legacy part of profile data, as in dasd_profile_info_t */ + unsigned int dasd_io_reqs; /* number of requests processed */ + unsigned int dasd_io_sects; /* number of sectors processed */ + unsigned int dasd_io_secs[32]; /* histogram of request's sizes */ + unsigned int dasd_io_times[32]; /* histogram of requests's times */ + unsigned int dasd_io_timps[32]; /* h. of requests's times per sector */ + unsigned int dasd_io_time1[32]; /* hist. of time from build to start */ + unsigned int dasd_io_time2[32]; /* hist. of time from start to irq */ + unsigned int dasd_io_time2ps[32]; /* hist. of time from start to irq */ + unsigned int dasd_io_time3[32]; /* hist. of time from irq to end */ + unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */ + + /* new data */ + struct timespec starttod; /* time of start or last reset */ + unsigned int dasd_io_alias; /* requests using an alias */ + unsigned int dasd_io_tpm; /* requests using transport mode */ + unsigned int dasd_read_reqs; /* total number of read requests */ + unsigned int dasd_read_sects; /* total number read sectors */ + unsigned int dasd_read_alias; /* read request using an alias */ + unsigned int dasd_read_tpm; /* read requests in transport mode */ + unsigned int dasd_read_secs[32]; /* histogram of request's sizes */ + unsigned int dasd_read_times[32]; /* histogram of requests's times */ + unsigned int dasd_read_time1[32]; /* hist. time from build to start */ + unsigned int dasd_read_time2[32]; /* hist. of time from start to irq */ + unsigned int dasd_read_time3[32]; /* hist. of time from irq to end */ + unsigned int dasd_read_nr_req[32]; /* hist. of # of requests in chanq */ +}; + +struct dasd_profile { + struct dentry *dentry; + struct dasd_profile_info *data; + spinlock_t lock; +}; + struct dasd_device { /* Block device stuff. */ struct dasd_block *block; @@ -365,6 +433,7 @@ struct dasd_device { struct dasd_discipline *discipline; struct dasd_discipline *base_discipline; char *private; + struct dasd_path path_data; /* Device state and target state. */ int state, target; @@ -386,6 +455,8 @@ struct dasd_device { struct tasklet_struct tasklet; struct work_struct kick_work; struct work_struct restore_device; + struct work_struct reload_device; + struct work_struct kick_validate; struct timer_list timer; debug_info_t *debug_area; @@ -394,6 +465,15 @@ struct dasd_device { /* hook for alias management */ struct list_head alias_list; + + /* default expiration time in s */ + unsigned long default_expires; + unsigned long default_retries; + + unsigned long blk_timeout; + + struct dentry *debugfs_dentry; + struct dasd_profile profile; }; struct dasd_block { @@ -416,9 +496,8 @@ struct dasd_block { struct tasklet_struct tasklet; struct timer_list timer; -#ifdef CONFIG_DASD_PROFILE - struct dasd_profile_info_t profile; -#endif + struct dentry *debugfs_dentry; + struct dasd_profile profile; }; @@ -440,6 +519,15 @@ struct dasd_block { * confuse this with the user specified * read-only feature. */ +#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */ +#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */ +#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */ +#define DASD_FLAG_SAFE_OFFLINE 10 /* safe offline processing requested*/ +#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */ +#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */ + +#define DASD_SLEEPON_START_TAG ((void *) 1) +#define DASD_SLEEPON_END_TAG ((void *) 2) void dasd_put_device_wake(struct dasd_device *); @@ -549,12 +637,13 @@ dasd_check_blocksize(int bsize) } /* externals in dasd.c */ -#define DASD_PROFILE_ON 1 -#define DASD_PROFILE_OFF 0 +#define DASD_PROFILE_OFF 0 +#define DASD_PROFILE_ON 1 +#define DASD_PROFILE_GLOBAL_ONLY 2 extern debug_info_t *dasd_debug_area; -extern struct dasd_profile_info_t dasd_global_profile; -extern unsigned int dasd_profile_level; +extern struct dasd_profile_info dasd_global_profile_data; +extern unsigned int dasd_global_profile_level; extern const struct block_device_operations dasd_device_operations; extern struct kmem_cache *dasd_page_cache; @@ -565,6 +654,7 @@ struct dasd_ccw_req * dasd_smalloc_request(int , int, int, struct dasd_device *); void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *); void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); +void dasd_wakeup_cb(struct dasd_ccw_req *, void *); static inline int dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device) @@ -578,10 +668,13 @@ void dasd_free_device(struct dasd_device *); struct dasd_block *dasd_alloc_block(void); void dasd_free_block(struct dasd_block *); +enum blk_eh_timer_return dasd_times_out(struct request *req); + void dasd_enable_device(struct dasd_device *); void dasd_set_target_state(struct dasd_device *, int); void dasd_kick_device(struct dasd_device *); void dasd_restore_device(struct dasd_device *); +void dasd_reload_device(struct dasd_device *); void dasd_add_request_head(struct dasd_ccw_req *); void dasd_add_request_tail(struct dasd_ccw_req *); @@ -590,6 +683,7 @@ int dasd_term_IO(struct dasd_ccw_req *); void dasd_schedule_device_bh(struct dasd_device *); void dasd_schedule_block_bh(struct dasd_block *); int dasd_sleep_on(struct dasd_ccw_req *); +int dasd_sleep_on_queue(struct list_head *); int dasd_sleep_on_immediatly(struct dasd_ccw_req *); int dasd_sleep_on_interruptible(struct dasd_ccw_req *); void dasd_device_set_timer(struct dasd_device *, int); @@ -603,9 +697,16 @@ void dasd_generic_remove (struct ccw_device *cdev); int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); int dasd_generic_set_offline (struct ccw_device *cdev); int dasd_generic_notify(struct ccw_device *, int); +int dasd_generic_last_path_gone(struct dasd_device *); +int dasd_generic_path_operational(struct dasd_device *); +void dasd_generic_shutdown(struct ccw_device *); + void dasd_generic_handle_state_change(struct dasd_device *); int dasd_generic_pm_freeze(struct ccw_device *); int dasd_generic_restore_device(struct ccw_device *); +enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); +void dasd_generic_path_event(struct ccw_device *, int *); +int dasd_generic_verify_path(struct dasd_device *, __u8); int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); char *dasd_get_sense(struct irb *); @@ -615,6 +716,11 @@ void dasd_device_remove_stop_bits(struct dasd_device *, int); int dasd_device_is_ro(struct dasd_device *); +void dasd_profile_reset(struct dasd_profile *); +int dasd_profile_on(struct dasd_profile *); +void dasd_profile_off(struct dasd_profile *); +void dasd_global_profile_reset(void); +char *dasd_get_user_string(const char __user *, size_t); /* externals in dasd_devmap.c */ extern int dasd_max_devindex; @@ -629,8 +735,6 @@ void dasd_devmap_exit(void); struct dasd_device *dasd_create_device(struct ccw_device *); void dasd_delete_device(struct dasd_device *); -int dasd_get_uid(struct ccw_device *, struct dasd_uid *); -int dasd_set_uid(struct ccw_device *, struct dasd_uid *); int dasd_get_feature(struct ccw_device *, int); int dasd_set_feature(struct ccw_device *, int, int); @@ -641,6 +745,9 @@ struct dasd_device *dasd_device_from_cdev(struct ccw_device *); struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *); struct dasd_device *dasd_device_from_devindex(int); +void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *); +struct dasd_device *dasd_device_from_gendisk(struct gendisk *); + int dasd_parse(void); int dasd_busid_known(const char *); @@ -696,6 +803,4 @@ static inline int dasd_eer_enabled(struct dasd_device *device) #define dasd_eer_enabled(d) (0) #endif /* CONFIG_DASD_ERR */ -#endif /* __KERNEL__ */ - #endif /* DASD_H */ diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 3479f8158a1..25a0f2f8b0b 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -1,11 +1,10 @@ /* - * File...........: linux/drivers/s390/block/dasd_ioctl.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 + * Copyright IBM Corp. 1999, 2001 * * i/o controls for the dasd driver. */ @@ -13,12 +12,14 @@ #define KMSG_COMPONENT "dasd" #include <linux/interrupt.h> +#include <linux/compat.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/blkpg.h> -#include <linux/smp_lock.h> +#include <linux/slab.h> #include <asm/compat.h> #include <asm/ccwdev.h> +#include <asm/schid.h> #include <asm/cmb.h> #include <asm/uaccess.h> @@ -42,16 +43,22 @@ dasd_ioctl_api_version(void __user *argp) static int dasd_ioctl_enable(struct block_device *bdev) { - struct dasd_block *block = bdev->bd_disk->private_data; + struct dasd_device *base; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - dasd_enable_device(block->base); + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; + + dasd_enable_device(base); /* Formatting the dasd device can change the capacity. */ mutex_lock(&bdev->bd_mutex); - i_size_write(bdev->bd_inode, (loff_t)get_capacity(block->gdp) << 9); + i_size_write(bdev->bd_inode, + (loff_t)get_capacity(base->block->gdp) << 9); mutex_unlock(&bdev->bd_mutex); + dasd_put_device(base); return 0; } @@ -62,11 +69,14 @@ dasd_ioctl_enable(struct block_device *bdev) static int dasd_ioctl_disable(struct block_device *bdev) { - struct dasd_block *block = bdev->bd_disk->private_data; + struct dasd_device *base; if (!capable(CAP_SYS_ADMIN)) return -EACCES; + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; /* * Man this is sick. We don't do a real disable but only downgrade * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses @@ -75,7 +85,7 @@ dasd_ioctl_disable(struct block_device *bdev) * using the BIODASDFMT ioctl. Therefore the correct state for the * device is DASD_STATE_BASIC that allows to do basic i/o. */ - dasd_set_target_state(block->base, DASD_STATE_BASIC); + dasd_set_target_state(base, DASD_STATE_BASIC); /* * Set i_size to zero, since read, write, etc. check against this * value. @@ -83,6 +93,7 @@ dasd_ioctl_disable(struct block_device *bdev) mutex_lock(&bdev->bd_mutex); i_size_write(bdev->bd_inode, 0); mutex_unlock(&bdev->bd_mutex); + dasd_put_device(base); return 0; } @@ -130,14 +141,67 @@ static int dasd_ioctl_resume(struct dasd_block *block) } /* + * Abort all failfast I/O on a device. + */ +static int dasd_ioctl_abortio(struct dasd_block *block) +{ + unsigned long flags; + struct dasd_device *base; + struct dasd_ccw_req *cqr, *n; + + base = block->base; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (test_and_set_bit(DASD_FLAG_ABORTALL, &base->flags)) + return 0; + DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag set"); + + spin_lock_irqsave(&block->request_queue_lock, flags); + spin_lock(&block->queue_lock); + list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { + if (test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && + cqr->callback_data && + cqr->callback_data != DASD_SLEEPON_START_TAG && + cqr->callback_data != DASD_SLEEPON_END_TAG) { + spin_unlock(&block->queue_lock); + blk_abort_request(cqr->callback_data); + spin_lock(&block->queue_lock); + } + } + spin_unlock(&block->queue_lock); + spin_unlock_irqrestore(&block->request_queue_lock, flags); + + dasd_schedule_block_bh(block); + return 0; +} + +/* + * Allow I/O on a device + */ +static int dasd_ioctl_allowio(struct dasd_block *block) +{ + struct dasd_device *base; + + base = block->base; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (test_and_clear_bit(DASD_FLAG_ABORTALL, &base->flags)) + DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag unset"); + + return 0; +} + +/* * performs formatting of _device_ according to _fdata_ * Note: The discipline's format_function is assumed to deliver formatting - * commands to format a single unit of the device. In terms of the ECKD - * devices this means CCWs are generated to format a single track. + * commands to format multiple units of the device. In terms of the ECKD + * devices this means CCWs are generated to format multiple tracks. */ -static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) +static int +dasd_format(struct dasd_block *block, struct format_data_t *fdata) { - struct dasd_ccw_req *cqr; struct dasd_device *base; int rc; @@ -146,8 +210,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) return -EPERM; if (base->state != DASD_STATE_BASIC) { - pr_warning("%s: The DASD cannot be formatted while it is " - "enabled\n", dev_name(&base->cdev->dev)); + pr_warn("%s: The DASD cannot be formatted while it is enabled\n", + dev_name(&base->cdev->dev)); return -EBUSY; } @@ -167,21 +231,10 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) bdput(bdev); } - while (fdata->start_unit <= fdata->stop_unit) { - cqr = base->discipline->format_device(base, fdata); - if (IS_ERR(cqr)) - return PTR_ERR(cqr); - rc = dasd_sleep_on_interruptible(cqr); - dasd_sfree_request(cqr, cqr->memdev); - if (rc) { - if (rc != -ERESTARTSYS) - pr_err("%s: Formatting unit %d failed with " - "rc=%d\n", dev_name(&base->cdev->dev), - fdata->start_unit, rc); - return rc; - } - fdata->start_unit++; - } + rc = base->discipline->format_device(base, fdata); + if (rc) + return rc; + return 0; } @@ -191,26 +244,36 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) static int dasd_ioctl_format(struct block_device *bdev, void __user *argp) { - struct dasd_block *block = bdev->bd_disk->private_data; + struct dasd_device *base; struct format_data_t fdata; + int rc; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!argp) return -EINVAL; - - if (block->base->features & DASD_FEATURE_READONLY || - test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags)) + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; + if (base->features & DASD_FEATURE_READONLY || + test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) { + dasd_put_device(base); return -EROFS; - if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) + } + if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) { + dasd_put_device(base); return -EFAULT; + } if (bdev != bdev->bd_contains) { pr_warning("%s: The specified DASD is a partition and cannot " "be formatted\n", - dev_name(&block->base->cdev->dev)); + dev_name(&base->cdev->dev)); + dasd_put_device(base); return -EINVAL; } - return dasd_format(block, &fdata); + rc = dasd_format(base->block, &fdata); + dasd_put_device(base); + return rc; } #ifdef CONFIG_DASD_PROFILE @@ -219,7 +282,7 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp) */ static int dasd_ioctl_reset_profile(struct dasd_block *block) { - memset(&block->profile, 0, sizeof(struct dasd_profile_info_t)); + dasd_profile_reset(&block->profile); return 0; } @@ -228,22 +291,56 @@ static int dasd_ioctl_reset_profile(struct dasd_block *block) */ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) { - if (dasd_profile_level == DASD_PROFILE_OFF) - return -EIO; - if (copy_to_user(argp, &block->profile, - sizeof(struct dasd_profile_info_t))) - return -EFAULT; - return 0; + struct dasd_profile_info_t *data; + int rc = 0; + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + spin_lock_bh(&block->profile.lock); + if (block->profile.data) { + data->dasd_io_reqs = block->profile.data->dasd_io_reqs; + data->dasd_io_sects = block->profile.data->dasd_io_sects; + memcpy(data->dasd_io_secs, block->profile.data->dasd_io_secs, + sizeof(data->dasd_io_secs)); + memcpy(data->dasd_io_times, block->profile.data->dasd_io_times, + sizeof(data->dasd_io_times)); + memcpy(data->dasd_io_timps, block->profile.data->dasd_io_timps, + sizeof(data->dasd_io_timps)); + memcpy(data->dasd_io_time1, block->profile.data->dasd_io_time1, + sizeof(data->dasd_io_time1)); + memcpy(data->dasd_io_time2, block->profile.data->dasd_io_time2, + sizeof(data->dasd_io_time2)); + memcpy(data->dasd_io_time2ps, + block->profile.data->dasd_io_time2ps, + sizeof(data->dasd_io_time2ps)); + memcpy(data->dasd_io_time3, block->profile.data->dasd_io_time3, + sizeof(data->dasd_io_time3)); + memcpy(data->dasd_io_nr_req, + block->profile.data->dasd_io_nr_req, + sizeof(data->dasd_io_nr_req)); + spin_unlock_bh(&block->profile.lock); + } else { + spin_unlock_bh(&block->profile.lock); + rc = -EIO; + goto out; + } + if (copy_to_user(argp, data, sizeof(*data))) + rc = -EFAULT; +out: + kfree(data); + return rc; } #else static int dasd_ioctl_reset_profile(struct dasd_block *block) { - return -ENOSYS; + return -ENOTTY; } static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) { - return -ENOSYS; + return -ENOTTY; } #endif @@ -254,11 +351,12 @@ static int dasd_ioctl_information(struct dasd_block *block, unsigned int cmd, void __user *argp) { struct dasd_information2_t *dasd_info; - unsigned long flags; - int rc; + struct subchannel_id sch_id; + struct ccw_dev_id dev_id; struct dasd_device *base; struct ccw_device *cdev; - struct ccw_dev_id dev_id; + unsigned long flags; + int rc; base = block->base; if (!base->discipline || !base->discipline->fill_info) @@ -276,9 +374,10 @@ static int dasd_ioctl_information(struct dasd_block *block, cdev = base->cdev; ccw_device_get_id(cdev, &dev_id); + ccw_device_get_schid(cdev, &sch_id); dasd_info->devno = dev_id.devno; - dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev); + dasd_info->schid = sch_id.sch_no; dasd_info->cu_type = cdev->id.cu_type; dasd_info->cu_model = cdev->id.cu_model; dasd_info->dev_type = cdev->id.dev_type; @@ -340,8 +439,8 @@ static int dasd_ioctl_information(struct dasd_block *block, static int dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) { - struct dasd_block *block = bdev->bd_disk->private_data; - int intval; + struct dasd_device *base; + int intval, rc; if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -350,10 +449,17 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) return -EINVAL; if (get_user(intval, (int __user *)argp)) return -EFAULT; - if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags)) + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; + if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) { + dasd_put_device(base); return -EROFS; + } set_disk_ro(bdev->bd_disk, intval); - return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval); + rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval); + dasd_put_device(base); + return rc; } static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, @@ -369,74 +475,84 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, return ret; } -static int -dasd_do_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) +int dasd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) { - struct dasd_block *block = bdev->bd_disk->private_data; + struct dasd_block *block; + struct dasd_device *base; void __user *argp; + int rc; if (is_compat_task()) argp = compat_ptr(arg); else argp = (void __user *)arg; - if (!block) - return -ENODEV; - if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { PRINT_DEBUG("empty data ptr"); return -EINVAL; } + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; + block = base->block; + rc = 0; switch (cmd) { case BIODASDDISABLE: - return dasd_ioctl_disable(bdev); + rc = dasd_ioctl_disable(bdev); + break; case BIODASDENABLE: - return dasd_ioctl_enable(bdev); + rc = dasd_ioctl_enable(bdev); + break; case BIODASDQUIESCE: - return dasd_ioctl_quiesce(block); + rc = dasd_ioctl_quiesce(block); + break; case BIODASDRESUME: - return dasd_ioctl_resume(block); + rc = dasd_ioctl_resume(block); + break; + case BIODASDABORTIO: + rc = dasd_ioctl_abortio(block); + break; + case BIODASDALLOWIO: + rc = dasd_ioctl_allowio(block); + break; case BIODASDFMT: - return dasd_ioctl_format(bdev, argp); + rc = dasd_ioctl_format(bdev, argp); + break; case BIODASDINFO: - return dasd_ioctl_information(block, cmd, argp); + rc = dasd_ioctl_information(block, cmd, argp); + break; case BIODASDINFO2: - return dasd_ioctl_information(block, cmd, argp); + rc = dasd_ioctl_information(block, cmd, argp); + break; case BIODASDPRRD: - return dasd_ioctl_read_profile(block, argp); + rc = dasd_ioctl_read_profile(block, argp); + break; case BIODASDPRRST: - return dasd_ioctl_reset_profile(block); + rc = dasd_ioctl_reset_profile(block); + break; case BLKROSET: - return dasd_ioctl_set_ro(bdev, argp); + rc = dasd_ioctl_set_ro(bdev, argp); + break; case DASDAPIVER: - return dasd_ioctl_api_version(argp); + rc = dasd_ioctl_api_version(argp); + break; case BIODASDCMFENABLE: - return enable_cmf(block->base->cdev); + rc = enable_cmf(base->cdev); + break; case BIODASDCMFDISABLE: - return disable_cmf(block->base->cdev); + rc = disable_cmf(base->cdev); + break; case BIODASDREADALLCMB: - return dasd_ioctl_readall_cmb(block, cmd, argp); + rc = dasd_ioctl_readall_cmb(block, cmd, argp); + break; default: /* if the discipline has an ioctl method try it. */ - if (block->base->discipline->ioctl) { - int rval = block->base->discipline->ioctl(block, cmd, argp); - if (rval != -ENOIOCTLCMD) - return rval; - } - - return -EINVAL; + rc = -ENOTTY; + if (base->discipline->ioctl) + rc = base->discipline->ioctl(block, cmd, argp); } -} - -int dasd_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - int rc; - - lock_kernel(); - rc = dasd_do_ioctl(bdev, mode, cmd, arg); - unlock_kernel(); + dasd_put_device(base); return rc; } diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index f13a0bdd148..78ac905a5b7 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -1,11 +1,10 @@ /* - * File...........: linux/drivers/s390/block/dasd_proc.c * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Horst Hummel <Horst.Hummel@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2002 + * Coypright IBM Corp. 1999, 2002 * * /proc interface for the dasd driver. * @@ -14,6 +13,7 @@ #define KMSG_COMPONENT "dasd" #include <linux/ctype.h> +#include <linux/slab.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/vmalloc.h> @@ -31,28 +31,6 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL; static struct proc_dir_entry *dasd_devices_entry = NULL; static struct proc_dir_entry *dasd_statistics_entry = NULL; -#ifdef CONFIG_DASD_PROFILE -static char * -dasd_get_user_string(const char __user *user_buf, size_t user_len) -{ - char *buffer; - - buffer = kmalloc(user_len + 1, GFP_KERNEL); - if (buffer == NULL) - return ERR_PTR(-ENOMEM); - if (copy_from_user(buffer, user_buf, user_len) != 0) { - kfree(buffer); - return ERR_PTR(-EFAULT); - } - /* got the string, now strip linefeed. */ - if (buffer[user_len - 1] == '\n') - buffer[user_len - 1] = 0; - else - buffer[user_len] = 0; - return buffer; -} -#endif /* CONFIG_DASD_PROFILE */ - static int dasd_devices_show(struct seq_file *m, void *v) { @@ -166,6 +144,55 @@ static const struct file_operations dasd_devices_file_ops = { }; #ifdef CONFIG_DASD_PROFILE +static int dasd_stats_all_block_on(void) +{ + int i, rc; + struct dasd_device *device; + + rc = 0; + for (i = 0; i < dasd_max_devindex; ++i) { + device = dasd_device_from_devindex(i); + if (IS_ERR(device)) + continue; + if (device->block) + rc = dasd_profile_on(&device->block->profile); + dasd_put_device(device); + if (rc) + return rc; + } + return 0; +} + +static void dasd_stats_all_block_off(void) +{ + int i; + struct dasd_device *device; + + for (i = 0; i < dasd_max_devindex; ++i) { + device = dasd_device_from_devindex(i); + if (IS_ERR(device)) + continue; + if (device->block) + dasd_profile_off(&device->block->profile); + dasd_put_device(device); + } +} + +static void dasd_stats_all_block_reset(void) +{ + int i; + struct dasd_device *device; + + for (i = 0; i < dasd_max_devindex; ++i) { + device = dasd_device_from_devindex(i); + if (IS_ERR(device)) + continue; + if (device->block) + dasd_profile_reset(&device->block->profile); + dasd_put_device(device); + } +} + static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor) { int i; @@ -182,18 +209,18 @@ static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int f static int dasd_stats_proc_show(struct seq_file *m, void *v) { #ifdef CONFIG_DASD_PROFILE - struct dasd_profile_info_t *prof; + struct dasd_profile_info *prof; int factor; /* check for active profiling */ - if (dasd_profile_level == DASD_PROFILE_OFF) { + if (!dasd_global_profile_level) { seq_printf(m, "Statistics are off - they might be " "switched on using 'echo set on > " "/proc/dasd/statistics'\n"); return 0; } + prof = &dasd_global_profile_data; - prof = &dasd_global_profile; /* prevent counter 'overflow' on output */ for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; factor *= 10); @@ -244,13 +271,13 @@ static ssize_t dasd_stats_proc_write(struct file *file, { #ifdef CONFIG_DASD_PROFILE char *buffer, *str; + int rc; if (user_len > 65536) user_len = 65536; buffer = dasd_get_user_string(user_buf, user_len); if (IS_ERR(buffer)) return PTR_ERR(buffer); - DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer); /* check for valid verbs */ str = skip_spaces(buffer); @@ -259,32 +286,40 @@ static ssize_t dasd_stats_proc_write(struct file *file, str = skip_spaces(str + 4); if (strcmp(str, "on") == 0) { /* switch on statistics profiling */ - dasd_profile_level = DASD_PROFILE_ON; + rc = dasd_stats_all_block_on(); + if (rc) { + dasd_stats_all_block_off(); + goto out_error; + } + dasd_global_profile_reset(); + dasd_global_profile_level = DASD_PROFILE_ON; pr_info("The statistics feature has been switched " "on\n"); } else if (strcmp(str, "off") == 0) { /* switch off and reset statistics profiling */ - memset(&dasd_global_profile, - 0, sizeof (struct dasd_profile_info_t)); - dasd_profile_level = DASD_PROFILE_OFF; + dasd_global_profile_level = DASD_PROFILE_OFF; + dasd_global_profile_reset(); + dasd_stats_all_block_off(); pr_info("The statistics feature has been switched " "off\n"); } else - goto out_error; + goto out_parse_error; } else if (strncmp(str, "reset", 5) == 0) { /* reset the statistics */ - memset(&dasd_global_profile, 0, - sizeof (struct dasd_profile_info_t)); + dasd_global_profile_reset(); + dasd_stats_all_block_reset(); pr_info("The statistics have been reset\n"); } else - goto out_error; - kfree(buffer); + goto out_parse_error; + vfree(buffer); return user_len; -out_error: +out_parse_error: + rc = -EINVAL; pr_warning("%s is not a supported value for /proc/dasd/statistics\n", str); - kfree(buffer); - return -EINVAL; +out_error: + vfree(buffer); + return rc; #else pr_warning("/proc/dasd/statistics: is not activated in this kernel\n"); return user_len; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 9b43ae94beb..0f471750327 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -26,8 +26,8 @@ #define DCSS_BUS_ID_SIZE 20 static int dcssblk_open(struct block_device *bdev, fmode_t mode); -static int dcssblk_release(struct gendisk *disk, fmode_t mode); -static int dcssblk_make_request(struct request_queue *q, struct bio *bio); +static void dcssblk_release(struct gendisk *disk, fmode_t mode); +static void dcssblk_make_request(struct request_queue *q, struct bio *bio); static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, void **kaddr, unsigned long *pfn); @@ -69,23 +69,9 @@ static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *a size_t count); static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count); -static ssize_t dcssblk_save_store(struct device * dev, struct device_attribute *attr, const char * buf, - size_t count); -static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf); -static ssize_t dcssblk_shared_store(struct device * dev, struct device_attribute *attr, const char * buf, - size_t count); -static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf); -static ssize_t dcssblk_seglist_show(struct device *dev, - struct device_attribute *attr, - char *buf); static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store); static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store); -static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show, - dcssblk_save_store); -static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show, - dcssblk_shared_store); -static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL); static struct device *dcssblk_root_dev; @@ -318,12 +304,6 @@ dcssblk_load_segment(char *name, struct segment_info **seg_info) return rc; } -static void dcssblk_unregister_callback(struct device *dev) -{ - device_unregister(dev); - put_device(dev); -} - /* * device attribute for switching shared/nonshared (exclusive) * operation (show + store) @@ -411,11 +391,19 @@ removeseg: blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); - rc = device_schedule_callback(dev, dcssblk_unregister_callback); + up_write(&dcssblk_devices_sem); + + if (device_remove_file_self(dev, attr)) { + device_unregister(dev); + put_device(dev); + } + return rc; out: up_write(&dcssblk_devices_sem); return rc; } +static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show, + dcssblk_shared_store); /* * device attribute for save operation on current copy @@ -476,6 +464,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char up_write(&dcssblk_devices_sem); return count; } +static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show, + dcssblk_save_store); /* * device attribute for showing all segments in a device @@ -502,6 +492,21 @@ dcssblk_seglist_show(struct device *dev, struct device_attribute *attr, up_read(&dcssblk_devices_sem); return i; } +static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL); + +static struct attribute *dcssblk_dev_attrs[] = { + &dev_attr_shared.attr, + &dev_attr_save.attr, + &dev_attr_seglist.attr, + NULL, +}; +static struct attribute_group dcssblk_dev_attr_group = { + .attrs = dcssblk_dev_attrs, +}; +static const struct attribute_group *dcssblk_dev_attr_groups[] = { + &dcssblk_dev_attr_group, + NULL, +}; /* * device attribute for adding devices @@ -588,8 +593,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->start = dcssblk_find_lowest_addr(dev_info); dev_info->end = dcssblk_find_highest_addr(dev_info); - dev_set_name(&dev_info->dev, dev_info->segment_name); + dev_set_name(&dev_info->dev, "%s", dev_info->segment_name); dev_info->dev.release = dcssblk_release_segment; + dev_info->dev.groups = dcssblk_dev_attr_groups; INIT_LIST_HEAD(&dev_info->lh); dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK); if (dev_info->gd == NULL) { @@ -637,21 +643,10 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char * register the device */ rc = device_register(&dev_info->dev); - if (rc) { - module_put(THIS_MODULE); - goto dev_list_del; - } - get_device(&dev_info->dev); - rc = device_create_file(&dev_info->dev, &dev_attr_shared); - if (rc) - goto unregister_dev; - rc = device_create_file(&dev_info->dev, &dev_attr_save); - if (rc) - goto unregister_dev; - rc = device_create_file(&dev_info->dev, &dev_attr_seglist); if (rc) - goto unregister_dev; + goto put_dev; + get_device(&dev_info->dev); add_disk(dev_info->gd); switch (dev_info->segment_type) { @@ -668,12 +663,11 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char rc = count; goto out; -unregister_dev: +put_dev: list_del(&dev_info->lh); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); - device_unregister(&dev_info->dev); list_for_each_entry(seg_info, &dev_info->seg_list, lh) { segment_unload(seg_info->segment_name); } @@ -787,16 +781,15 @@ out: return rc; } -static int +static void dcssblk_release(struct gendisk *disk, fmode_t mode) { struct dcssblk_dev_info *dev_info = disk->private_data; struct segment_info *entry; - int rc; if (!dev_info) { - rc = -ENODEV; - goto out; + WARN_ON(1); + return; } down_write(&dcssblk_devices_sem); if (atomic_dec_and_test(&dev_info->use_count) @@ -809,31 +802,28 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) dev_info->save_pending = 0; } up_write(&dcssblk_devices_sem); - rc = 0; -out: - return rc; } -static int +static void dcssblk_make_request(struct request_queue *q, struct bio *bio) { struct dcssblk_dev_info *dev_info; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; unsigned long index; unsigned long page_addr; unsigned long source_addr; unsigned long bytes_done; - int i; bytes_done = 0; dev_info = bio->bi_bdev->bd_disk->private_data; if (dev_info == NULL) goto fail; - if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) + if ((bio->bi_iter.bi_sector & 7) != 0 || + (bio->bi_iter.bi_size & 4095) != 0) /* Request is not page-aligned. */ goto fail; - if (((bio->bi_size >> 9) + bio->bi_sector) - > get_capacity(bio->bi_bdev->bd_disk)) { + if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { /* Request beyond end of DCSS segment. */ goto fail; } @@ -853,28 +843,27 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) } } - index = (bio->bi_sector >> 3); - bio_for_each_segment(bvec, bio, i) { + index = (bio->bi_iter.bi_sector >> 3); + bio_for_each_segment(bvec, bio, iter) { page_addr = (unsigned long) - page_address(bvec->bv_page) + bvec->bv_offset; + page_address(bvec.bv_page) + bvec.bv_offset; source_addr = dev_info->start + (index<<12) + bytes_done; - if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0) + if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) // More paranoia. goto fail; if (bio_data_dir(bio) == READ) { memcpy((void*)page_addr, (void*)source_addr, - bvec->bv_len); + bvec.bv_len); } else { memcpy((void*)source_addr, (void*)page_addr, - bvec->bv_len); + bvec.bv_len); } - bytes_done += bvec->bv_len; + bytes_done += bvec.bv_len; } bio_endio(bio, 0); - return 0; + return; fail: bio_io_error(bio); - return 0; } static int diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c new file mode 100644 index 00000000000..76bed1743db --- /dev/null +++ b/drivers/s390/block/scm_blk.c @@ -0,0 +1,493 @@ +/* + * Block driver for s390 storage class memory. + * + * Copyright IBM Corp. 2012 + * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#define KMSG_COMPONENT "scm_block" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <linux/genhd.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <asm/eadm.h> +#include "scm_blk.h" + +debug_info_t *scm_debug; +static int scm_major; +static DEFINE_SPINLOCK(list_lock); +static LIST_HEAD(inactive_requests); +static unsigned int nr_requests = 64; +static atomic_t nr_devices = ATOMIC_INIT(0); +module_param(nr_requests, uint, S_IRUGO); +MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); + +MODULE_DESCRIPTION("Block driver for s390 storage class memory."); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("scm:scmdev*"); + +static void __scm_free_rq(struct scm_request *scmrq) +{ + struct aob_rq_header *aobrq = to_aobrq(scmrq); + + free_page((unsigned long) scmrq->aob); + free_page((unsigned long) scmrq->aidaw); + __scm_free_rq_cluster(scmrq); + kfree(aobrq); +} + +static void scm_free_rqs(void) +{ + struct list_head *iter, *safe; + struct scm_request *scmrq; + + spin_lock_irq(&list_lock); + list_for_each_safe(iter, safe, &inactive_requests) { + scmrq = list_entry(iter, struct scm_request, list); + list_del(&scmrq->list); + __scm_free_rq(scmrq); + } + spin_unlock_irq(&list_lock); +} + +static int __scm_alloc_rq(void) +{ + struct aob_rq_header *aobrq; + struct scm_request *scmrq; + + aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL); + if (!aobrq) + return -ENOMEM; + + scmrq = (void *) aobrq->data; + scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA); + scmrq->aob = (void *) get_zeroed_page(GFP_DMA); + if (!scmrq->aob || !scmrq->aidaw) { + __scm_free_rq(scmrq); + return -ENOMEM; + } + + if (__scm_alloc_rq_cluster(scmrq)) { + __scm_free_rq(scmrq); + return -ENOMEM; + } + + INIT_LIST_HEAD(&scmrq->list); + spin_lock_irq(&list_lock); + list_add(&scmrq->list, &inactive_requests); + spin_unlock_irq(&list_lock); + + return 0; +} + +static int scm_alloc_rqs(unsigned int nrqs) +{ + int ret = 0; + + while (nrqs-- && !ret) + ret = __scm_alloc_rq(); + + return ret; +} + +static struct scm_request *scm_request_fetch(void) +{ + struct scm_request *scmrq = NULL; + + spin_lock(&list_lock); + if (list_empty(&inactive_requests)) + goto out; + scmrq = list_first_entry(&inactive_requests, struct scm_request, list); + list_del(&scmrq->list); +out: + spin_unlock(&list_lock); + return scmrq; +} + +static void scm_request_done(struct scm_request *scmrq) +{ + unsigned long flags; + + spin_lock_irqsave(&list_lock, flags); + list_add(&scmrq->list, &inactive_requests); + spin_unlock_irqrestore(&list_lock, flags); +} + +static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) +{ + return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; +} + +static void scm_request_prepare(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + struct scm_device *scmdev = bdev->gendisk->private_data; + struct aidaw *aidaw = scmrq->aidaw; + struct msb *msb = &scmrq->aob->msb[0]; + struct req_iterator iter; + struct bio_vec bv; + + msb->bs = MSB_BS_4K; + scmrq->aob->request.msb_count = 1; + msb->scm_addr = scmdev->address + + ((u64) blk_rq_pos(scmrq->request) << 9); + msb->oc = (rq_data_dir(scmrq->request) == READ) ? + MSB_OC_READ : MSB_OC_WRITE; + msb->flags |= MSB_FLAG_IDA; + msb->data_addr = (u64) aidaw; + + rq_for_each_segment(bv, scmrq->request, iter) { + WARN_ON(bv.bv_offset); + msb->blk_count += bv.bv_len >> 12; + aidaw->data_addr = (u64) page_address(bv.bv_page); + aidaw++; + } +} + +static inline void scm_request_init(struct scm_blk_dev *bdev, + struct scm_request *scmrq, + struct request *req) +{ + struct aob_rq_header *aobrq = to_aobrq(scmrq); + struct aob *aob = scmrq->aob; + + memset(aob, 0, sizeof(*aob)); + memset(scmrq->aidaw, 0, PAGE_SIZE); + aobrq->scmdev = bdev->scmdev; + aob->request.cmd_code = ARQB_CMD_MOVE; + aob->request.data = (u64) aobrq; + scmrq->request = req; + scmrq->bdev = bdev; + scmrq->retries = 4; + scmrq->error = 0; + scm_request_cluster_init(scmrq); +} + +static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) +{ + if (atomic_read(&bdev->queued_reqs)) { + /* Queue restart is triggered by the next interrupt. */ + return; + } + blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); +} + +void scm_request_requeue(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + + scm_release_cluster(scmrq); + blk_requeue_request(bdev->rq, scmrq->request); + atomic_dec(&bdev->queued_reqs); + scm_request_done(scmrq); + scm_ensure_queue_restart(bdev); +} + +void scm_request_finish(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + + scm_release_cluster(scmrq); + blk_end_request_all(scmrq->request, scmrq->error); + atomic_dec(&bdev->queued_reqs); + scm_request_done(scmrq); +} + +static void scm_blk_request(struct request_queue *rq) +{ + struct scm_device *scmdev = rq->queuedata; + struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); + struct scm_request *scmrq; + struct request *req; + int ret; + + while ((req = blk_peek_request(rq))) { + if (req->cmd_type != REQ_TYPE_FS) { + blk_start_request(req); + blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); + blk_end_request_all(req, -EIO); + continue; + } + + if (!scm_permit_request(bdev, req)) { + scm_ensure_queue_restart(bdev); + return; + } + scmrq = scm_request_fetch(); + if (!scmrq) { + SCM_LOG(5, "no request"); + scm_ensure_queue_restart(bdev); + return; + } + scm_request_init(bdev, scmrq, req); + if (!scm_reserve_cluster(scmrq)) { + SCM_LOG(5, "cluster busy"); + scm_request_done(scmrq); + return; + } + if (scm_need_cluster_request(scmrq)) { + atomic_inc(&bdev->queued_reqs); + blk_start_request(req); + scm_initiate_cluster_request(scmrq); + return; + } + scm_request_prepare(scmrq); + atomic_inc(&bdev->queued_reqs); + blk_start_request(req); + + ret = eadm_start_aob(scmrq->aob); + if (ret) { + SCM_LOG(5, "no subchannel"); + scm_request_requeue(scmrq); + return; + } + } +} + +static void __scmrq_log_error(struct scm_request *scmrq) +{ + struct aob *aob = scmrq->aob; + + if (scmrq->error == -ETIMEDOUT) + SCM_LOG(1, "Request timeout"); + else { + SCM_LOG(1, "Request error"); + SCM_LOG_HEX(1, &aob->response, sizeof(aob->response)); + } + if (scmrq->retries) + SCM_LOG(1, "Retry request"); + else + pr_err("An I/O operation to SCM failed with rc=%d\n", + scmrq->error); +} + +void scm_blk_irq(struct scm_device *scmdev, void *data, int error) +{ + struct scm_request *scmrq = data; + struct scm_blk_dev *bdev = scmrq->bdev; + + scmrq->error = error; + if (error) + __scmrq_log_error(scmrq); + + spin_lock(&bdev->lock); + list_add_tail(&scmrq->list, &bdev->finished_requests); + spin_unlock(&bdev->lock); + tasklet_hi_schedule(&bdev->tasklet); +} + +static void scm_blk_handle_error(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + unsigned long flags; + + if (scmrq->error != -EIO) + goto restart; + + /* For -EIO the response block is valid. */ + switch (scmrq->aob->response.eqc) { + case EQC_WR_PROHIBIT: + spin_lock_irqsave(&bdev->lock, flags); + if (bdev->state != SCM_WR_PROHIBIT) + pr_info("%lx: Write access to the SCM increment is suspended\n", + (unsigned long) bdev->scmdev->address); + bdev->state = SCM_WR_PROHIBIT; + spin_unlock_irqrestore(&bdev->lock, flags); + goto requeue; + default: + break; + } + +restart: + if (!eadm_start_aob(scmrq->aob)) + return; + +requeue: + spin_lock_irqsave(&bdev->rq_lock, flags); + scm_request_requeue(scmrq); + spin_unlock_irqrestore(&bdev->rq_lock, flags); +} + +static void scm_blk_tasklet(struct scm_blk_dev *bdev) +{ + struct scm_request *scmrq; + unsigned long flags; + + spin_lock_irqsave(&bdev->lock, flags); + while (!list_empty(&bdev->finished_requests)) { + scmrq = list_first_entry(&bdev->finished_requests, + struct scm_request, list); + list_del(&scmrq->list); + spin_unlock_irqrestore(&bdev->lock, flags); + + if (scmrq->error && scmrq->retries-- > 0) { + scm_blk_handle_error(scmrq); + + /* Request restarted or requeued, handle next. */ + spin_lock_irqsave(&bdev->lock, flags); + continue; + } + + if (scm_test_cluster_request(scmrq)) { + scm_cluster_request_irq(scmrq); + spin_lock_irqsave(&bdev->lock, flags); + continue; + } + + scm_request_finish(scmrq); + spin_lock_irqsave(&bdev->lock, flags); + } + spin_unlock_irqrestore(&bdev->lock, flags); + /* Look out for more requests. */ + blk_run_queue(bdev->rq); +} + +static const struct block_device_operations scm_blk_devops = { + .owner = THIS_MODULE, +}; + +int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) +{ + struct request_queue *rq; + int len, ret = -ENOMEM; + unsigned int devindex, nr_max_blk; + + devindex = atomic_inc_return(&nr_devices) - 1; + /* scma..scmz + scmaa..scmzz */ + if (devindex > 701) { + ret = -ENODEV; + goto out; + } + + bdev->scmdev = scmdev; + bdev->state = SCM_OPER; + spin_lock_init(&bdev->rq_lock); + spin_lock_init(&bdev->lock); + INIT_LIST_HEAD(&bdev->finished_requests); + atomic_set(&bdev->queued_reqs, 0); + tasklet_init(&bdev->tasklet, + (void (*)(unsigned long)) scm_blk_tasklet, + (unsigned long) bdev); + + rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); + if (!rq) + goto out; + + bdev->rq = rq; + nr_max_blk = min(scmdev->nr_max_block, + (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); + + blk_queue_logical_block_size(rq, 1 << 12); + blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ + blk_queue_max_segments(rq, nr_max_blk); + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); + scm_blk_dev_cluster_setup(bdev); + + bdev->gendisk = alloc_disk(SCM_NR_PARTS); + if (!bdev->gendisk) + goto out_queue; + + rq->queuedata = scmdev; + bdev->gendisk->driverfs_dev = &scmdev->dev; + bdev->gendisk->private_data = scmdev; + bdev->gendisk->fops = &scm_blk_devops; + bdev->gendisk->queue = rq; + bdev->gendisk->major = scm_major; + bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; + + len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); + if (devindex > 25) { + len += snprintf(bdev->gendisk->disk_name + len, + DISK_NAME_LEN - len, "%c", + 'a' + (devindex / 26) - 1); + devindex = devindex % 26; + } + snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c", + 'a' + devindex); + + /* 512 byte sectors */ + set_capacity(bdev->gendisk, scmdev->size >> 9); + add_disk(bdev->gendisk); + return 0; + +out_queue: + blk_cleanup_queue(rq); +out: + atomic_dec(&nr_devices); + return ret; +} + +void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) +{ + tasklet_kill(&bdev->tasklet); + del_gendisk(bdev->gendisk); + blk_cleanup_queue(bdev->gendisk->queue); + put_disk(bdev->gendisk); +} + +void scm_blk_set_available(struct scm_blk_dev *bdev) +{ + unsigned long flags; + + spin_lock_irqsave(&bdev->lock, flags); + if (bdev->state == SCM_WR_PROHIBIT) + pr_info("%lx: Write access to the SCM increment is restored\n", + (unsigned long) bdev->scmdev->address); + bdev->state = SCM_OPER; + spin_unlock_irqrestore(&bdev->lock, flags); +} + +static int __init scm_blk_init(void) +{ + int ret = -EINVAL; + + if (!scm_cluster_size_valid()) + goto out; + + ret = register_blkdev(0, "scm"); + if (ret < 0) + goto out; + + scm_major = ret; + ret = scm_alloc_rqs(nr_requests); + if (ret) + goto out_free; + + scm_debug = debug_register("scm_log", 16, 1, 16); + if (!scm_debug) { + ret = -ENOMEM; + goto out_free; + } + + debug_register_view(scm_debug, &debug_hex_ascii_view); + debug_set_level(scm_debug, 2); + + ret = scm_drv_init(); + if (ret) + goto out_dbf; + + return ret; + +out_dbf: + debug_unregister(scm_debug); +out_free: + scm_free_rqs(); + unregister_blkdev(scm_major, "scm"); +out: + return ret; +} +module_init(scm_blk_init); + +static void __exit scm_blk_cleanup(void) +{ + scm_drv_cleanup(); + debug_unregister(scm_debug); + scm_free_rqs(); + unregister_blkdev(scm_major, "scm"); +} +module_exit(scm_blk_cleanup); diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h new file mode 100644 index 00000000000..e59331e6c2e --- /dev/null +++ b/drivers/s390/block/scm_blk.h @@ -0,0 +1,134 @@ +#ifndef SCM_BLK_H +#define SCM_BLK_H + +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/blkdev.h> +#include <linux/genhd.h> +#include <linux/list.h> + +#include <asm/debug.h> +#include <asm/eadm.h> + +#define SCM_NR_PARTS 8 +#define SCM_QUEUE_DELAY 5 + +struct scm_blk_dev { + struct tasklet_struct tasklet; + struct request_queue *rq; + struct gendisk *gendisk; + struct scm_device *scmdev; + spinlock_t rq_lock; /* guard the request queue */ + spinlock_t lock; /* guard the rest of the blockdev */ + atomic_t queued_reqs; + enum {SCM_OPER, SCM_WR_PROHIBIT} state; + struct list_head finished_requests; +#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE + struct list_head cluster_list; +#endif +}; + +struct scm_request { + struct scm_blk_dev *bdev; + struct request *request; + struct aidaw *aidaw; + struct aob *aob; + struct list_head list; + u8 retries; + int error; +#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE + struct { + enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state; + struct list_head list; + void **buf; + } cluster; +#endif +}; + +#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data) + +int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); +void scm_blk_dev_cleanup(struct scm_blk_dev *); +void scm_blk_set_available(struct scm_blk_dev *); +void scm_blk_irq(struct scm_device *, void *, int); + +void scm_request_finish(struct scm_request *); +void scm_request_requeue(struct scm_request *); + +int scm_drv_init(void); +void scm_drv_cleanup(void); + +#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE +void __scm_free_rq_cluster(struct scm_request *); +int __scm_alloc_rq_cluster(struct scm_request *); +void scm_request_cluster_init(struct scm_request *); +bool scm_reserve_cluster(struct scm_request *); +void scm_release_cluster(struct scm_request *); +void scm_blk_dev_cluster_setup(struct scm_blk_dev *); +bool scm_need_cluster_request(struct scm_request *); +void scm_initiate_cluster_request(struct scm_request *); +void scm_cluster_request_irq(struct scm_request *); +bool scm_test_cluster_request(struct scm_request *); +bool scm_cluster_size_valid(void); +#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ +static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {} +static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq) +{ + return 0; +} +static inline void scm_request_cluster_init(struct scm_request *scmrq) {} +static inline bool scm_reserve_cluster(struct scm_request *scmrq) +{ + return true; +} +static inline void scm_release_cluster(struct scm_request *scmrq) {} +static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {} +static inline bool scm_need_cluster_request(struct scm_request *scmrq) +{ + return false; +} +static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {} +static inline void scm_cluster_request_irq(struct scm_request *scmrq) {} +static inline bool scm_test_cluster_request(struct scm_request *scmrq) +{ + return false; +} +static inline bool scm_cluster_size_valid(void) +{ + return true; +} +#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */ + +extern debug_info_t *scm_debug; + +#define SCM_LOG(imp, txt) do { \ + debug_text_event(scm_debug, imp, txt); \ + } while (0) + +static inline void SCM_LOG_HEX(int level, void *data, int length) +{ + if (!debug_level_enabled(scm_debug, level)) + return; + while (length > 0) { + debug_event(scm_debug, level, data, length); + length -= scm_debug->buf_size; + data += scm_debug->buf_size; + } +} + +static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev) +{ + struct { + u64 address; + u8 oper_state; + u8 rank; + } __packed data = { + .address = scmdev->address, + .oper_state = scmdev->attrs.oper_state, + .rank = scmdev->attrs.rank, + }; + + SCM_LOG_HEX(level, &data, sizeof(data)); +} + +#endif /* SCM_BLK_H */ diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c new file mode 100644 index 00000000000..9aae909d47a --- /dev/null +++ b/drivers/s390/block/scm_blk_cluster.c @@ -0,0 +1,230 @@ +/* + * Block driver for s390 storage class memory. + * + * Copyright IBM Corp. 2012 + * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <linux/genhd.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <asm/eadm.h> +#include "scm_blk.h" + +static unsigned int write_cluster_size = 64; +module_param(write_cluster_size, uint, S_IRUGO); +MODULE_PARM_DESC(write_cluster_size, + "Number of pages used for contiguous writes."); + +#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE) + +void __scm_free_rq_cluster(struct scm_request *scmrq) +{ + int i; + + if (!scmrq->cluster.buf) + return; + + for (i = 0; i < 2 * write_cluster_size; i++) + free_page((unsigned long) scmrq->cluster.buf[i]); + + kfree(scmrq->cluster.buf); +} + +int __scm_alloc_rq_cluster(struct scm_request *scmrq) +{ + int i; + + scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size, + GFP_KERNEL); + if (!scmrq->cluster.buf) + return -ENOMEM; + + for (i = 0; i < 2 * write_cluster_size; i++) { + scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA); + if (!scmrq->cluster.buf[i]) + return -ENOMEM; + } + INIT_LIST_HEAD(&scmrq->cluster.list); + return 0; +} + +void scm_request_cluster_init(struct scm_request *scmrq) +{ + scmrq->cluster.state = CLUSTER_NONE; +} + +static bool clusters_intersect(struct scm_request *A, struct scm_request *B) +{ + unsigned long firstA, lastA, firstB, lastB; + + firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE; + lastA = (((u64) blk_rq_pos(A->request) << 9) + + blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE; + + firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE; + lastB = (((u64) blk_rq_pos(B->request) << 9) + + blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE; + + return (firstB <= lastA && firstA <= lastB); +} + +bool scm_reserve_cluster(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + struct scm_request *iter; + + if (write_cluster_size == 0) + return true; + + spin_lock(&bdev->lock); + list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { + if (clusters_intersect(scmrq, iter) && + (rq_data_dir(scmrq->request) == WRITE || + rq_data_dir(iter->request) == WRITE)) { + spin_unlock(&bdev->lock); + return false; + } + } + list_add(&scmrq->cluster.list, &bdev->cluster_list); + spin_unlock(&bdev->lock); + + return true; +} + +void scm_release_cluster(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + unsigned long flags; + + if (write_cluster_size == 0) + return; + + spin_lock_irqsave(&bdev->lock, flags); + list_del(&scmrq->cluster.list); + spin_unlock_irqrestore(&bdev->lock, flags); +} + +void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) +{ + INIT_LIST_HEAD(&bdev->cluster_list); + blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); +} + +static void scm_prepare_cluster_request(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + struct scm_device *scmdev = bdev->gendisk->private_data; + struct request *req = scmrq->request; + struct aidaw *aidaw = scmrq->aidaw; + struct msb *msb = &scmrq->aob->msb[0]; + struct req_iterator iter; + struct bio_vec bv; + int i = 0; + u64 addr; + + switch (scmrq->cluster.state) { + case CLUSTER_NONE: + scmrq->cluster.state = CLUSTER_READ; + /* fall through */ + case CLUSTER_READ: + scmrq->aob->request.msb_count = 1; + msb->bs = MSB_BS_4K; + msb->oc = MSB_OC_READ; + msb->flags = MSB_FLAG_IDA; + msb->data_addr = (u64) aidaw; + msb->blk_count = write_cluster_size; + + addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); + msb->scm_addr = round_down(addr, CLUSTER_SIZE); + + if (msb->scm_addr != + round_down(addr + (u64) blk_rq_bytes(req) - 1, + CLUSTER_SIZE)) + msb->blk_count = 2 * write_cluster_size; + + for (i = 0; i < msb->blk_count; i++) { + aidaw->data_addr = (u64) scmrq->cluster.buf[i]; + aidaw++; + } + + break; + case CLUSTER_WRITE: + msb->oc = MSB_OC_WRITE; + + for (addr = msb->scm_addr; + addr < scmdev->address + ((u64) blk_rq_pos(req) << 9); + addr += PAGE_SIZE) { + aidaw->data_addr = (u64) scmrq->cluster.buf[i]; + aidaw++; + i++; + } + rq_for_each_segment(bv, req, iter) { + aidaw->data_addr = (u64) page_address(bv.bv_page); + aidaw++; + i++; + } + for (; i < msb->blk_count; i++) { + aidaw->data_addr = (u64) scmrq->cluster.buf[i]; + aidaw++; + } + break; + } +} + +bool scm_need_cluster_request(struct scm_request *scmrq) +{ + if (rq_data_dir(scmrq->request) == READ) + return false; + + return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE; +} + +/* Called with queue lock held. */ +void scm_initiate_cluster_request(struct scm_request *scmrq) +{ + scm_prepare_cluster_request(scmrq); + if (eadm_start_aob(scmrq->aob)) + scm_request_requeue(scmrq); +} + +bool scm_test_cluster_request(struct scm_request *scmrq) +{ + return scmrq->cluster.state != CLUSTER_NONE; +} + +void scm_cluster_request_irq(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + unsigned long flags; + + switch (scmrq->cluster.state) { + case CLUSTER_NONE: + BUG(); + break; + case CLUSTER_READ: + if (scmrq->error) { + scm_request_finish(scmrq); + break; + } + scmrq->cluster.state = CLUSTER_WRITE; + spin_lock_irqsave(&bdev->rq_lock, flags); + scm_initiate_cluster_request(scmrq); + spin_unlock_irqrestore(&bdev->rq_lock, flags); + break; + case CLUSTER_WRITE: + scm_request_finish(scmrq); + break; + } +} + +bool scm_cluster_size_valid(void) +{ + if (write_cluster_size == 1 || write_cluster_size > 128) + return false; + + return !(write_cluster_size & (write_cluster_size - 1)); +} diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c new file mode 100644 index 00000000000..c98cf52d78d --- /dev/null +++ b/drivers/s390/block/scm_drv.c @@ -0,0 +1,92 @@ +/* + * Device driver for s390 storage class memory. + * + * Copyright IBM Corp. 2012 + * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#define KMSG_COMPONENT "scm_block" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/slab.h> +#include <asm/eadm.h> +#include "scm_blk.h" + +static void scm_notify(struct scm_device *scmdev, enum scm_event event) +{ + struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); + + switch (event) { + case SCM_CHANGE: + pr_info("%lx: The capabilities of the SCM increment changed\n", + (unsigned long) scmdev->address); + SCM_LOG(2, "State changed"); + SCM_LOG_STATE(2, scmdev); + break; + case SCM_AVAIL: + SCM_LOG(2, "Increment available"); + SCM_LOG_STATE(2, scmdev); + scm_blk_set_available(bdev); + break; + } +} + +static int scm_probe(struct scm_device *scmdev) +{ + struct scm_blk_dev *bdev; + int ret; + + SCM_LOG(2, "probe"); + SCM_LOG_STATE(2, scmdev); + + if (scmdev->attrs.oper_state != OP_STATE_GOOD) + return -EINVAL; + + bdev = kzalloc(sizeof(*bdev), GFP_KERNEL); + if (!bdev) + return -ENOMEM; + + dev_set_drvdata(&scmdev->dev, bdev); + ret = scm_blk_dev_setup(bdev, scmdev); + if (ret) { + dev_set_drvdata(&scmdev->dev, NULL); + kfree(bdev); + goto out; + } + +out: + return ret; +} + +static int scm_remove(struct scm_device *scmdev) +{ + struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); + + scm_blk_dev_cleanup(bdev); + dev_set_drvdata(&scmdev->dev, NULL); + kfree(bdev); + + return 0; +} + +static struct scm_driver scm_drv = { + .drv = { + .name = "scm_block", + .owner = THIS_MODULE, + }, + .notify = scm_notify, + .probe = scm_probe, + .remove = scm_remove, + .handler = scm_blk_irq, +}; + +int __init scm_drv_init(void) +{ + return scm_driver_register(&scm_drv); +} + +void scm_drv_cleanup(void) +{ + scm_driver_unregister(&scm_drv); +} diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 118de392af6..6969d39f1e2 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -33,14 +33,14 @@ #include <linux/ctype.h> /* isdigit, isxdigit */ #include <linux/errno.h> #include <linux/init.h> -#include <linux/slab.h> #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/hdreg.h> /* HDIO_GETGEO */ -#include <linux/sysdev.h> +#include <linux/device.h> #include <linux/bio.h> #include <linux/suspend.h> #include <linux/platform_device.h> +#include <linux/gfp.h> #include <asm/uaccess.h> #define XPRAM_NAME "xpram" @@ -62,8 +62,8 @@ static int xpram_devs; /* * Parameter parsing functions. */ -static int __initdata devs = XPRAM_DEVS; -static char __initdata *sizes[XPRAM_MAX_DEVS]; +static int devs = XPRAM_DEVS; +static char *sizes[XPRAM_MAX_DEVS]; module_param(devs, int, 0); module_param_array(sizes, charp, NULL, 0); @@ -181,28 +181,29 @@ static unsigned long xpram_highest_page_index(void) /* * Block device make request function. */ -static int xpram_make_request(struct request_queue *q, struct bio *bio) +static void xpram_make_request(struct request_queue *q, struct bio *bio) { xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; unsigned int index; unsigned long page_addr; unsigned long bytes; - int i; - if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) + if ((bio->bi_iter.bi_sector & 7) != 0 || + (bio->bi_iter.bi_size & 4095) != 0) /* Request is not page-aligned. */ goto fail; - if ((bio->bi_size >> 12) > xdev->size) + if ((bio->bi_iter.bi_size >> 12) > xdev->size) /* Request size is no page-aligned. */ goto fail; - if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset) + if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) goto fail; - index = (bio->bi_sector >> 3) + xdev->offset; - bio_for_each_segment(bvec, bio, i) { + index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; + bio_for_each_segment(bvec, bio, iter) { page_addr = (unsigned long) - kmap(bvec->bv_page) + bvec->bv_offset; - bytes = bvec->bv_len; + kmap(bvec.bv_page) + bvec.bv_offset; + bytes = bvec.bv_len; if ((page_addr & 4095) != 0 || (bytes & 4095) != 0) /* More paranoia. */ goto fail; @@ -221,10 +222,9 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio) } set_bit(BIO_UPTODATE, &bio->bi_flags); bio_endio(bio, 0); - return 0; + return; fail: bio_io_error(bio); - return 0; } static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) @@ -258,6 +258,7 @@ static int __init xpram_setup_sizes(unsigned long pages) unsigned long mem_needed; unsigned long mem_auto; unsigned long long size; + char *sizes_end; int mem_auto_no; int i; @@ -276,8 +277,8 @@ static int __init xpram_setup_sizes(unsigned long pages) mem_auto_no = 0; for (i = 0; i < xpram_devs; i++) { if (sizes[i]) { - size = simple_strtoull(sizes[i], &sizes[i], 0); - switch (sizes[i][0]) { + size = simple_strtoull(sizes[i], &sizes_end, 0); + switch (*sizes_end) { case 'g': case 'G': size <<= 20; @@ -344,6 +345,7 @@ static int __init xpram_setup_blkdev(void) put_disk(xpram_disks[i]); goto out; } + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_logical_block_size(xpram_queues[i], 4096); } diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index 4e34d3686c2..71bf959732f 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig @@ -2,76 +2,85 @@ comment "S/390 character device drivers" depends on S390 config TN3270 - tristate "Support for locally attached 3270 terminals" + def_tristate y + prompt "Support for locally attached 3270 terminals" depends on CCW help Include support for IBM 3270 terminals. config TN3270_TTY - tristate "Support for tty input/output on 3270 terminals" - depends on TN3270 + def_tristate y + prompt "Support for tty input/output on 3270 terminals" + depends on TN3270 && TTY help Include support for using an IBM 3270 terminal as a Linux tty. config TN3270_FS - tristate "Support for fullscreen applications on 3270 terminals" + def_tristate m + prompt "Support for fullscreen applications on 3270 terminals" depends on TN3270 help Include support for fullscreen applications on an IBM 3270 terminal. config TN3270_CONSOLE - bool "Support for console on 3270 terminal" + def_bool y + prompt "Support for console on 3270 terminal" depends on TN3270=y && TN3270_TTY=y help Include support for using an IBM 3270 terminal as a Linux system console. Available only if 3270 support is compiled in statically. config TN3215 - bool "Support for 3215 line mode terminal" - depends on CCW + def_bool y + prompt "Support for 3215 line mode terminal" + depends on CCW && TTY help Include support for IBM 3215 line-mode terminals. config TN3215_CONSOLE - bool "Support for console on 3215 line mode terminal" + def_bool y + prompt "Support for console on 3215 line mode terminal" depends on TN3215 help Include support for using an IBM 3215 line-mode terminal as a Linux system console. config CCW_CONSOLE - bool - depends on TN3215_CONSOLE || TN3270_CONSOLE - default y + def_bool y if TN3215_CONSOLE || TN3270_CONSOLE config SCLP_TTY - bool "Support for SCLP line mode terminal" - depends on S390 + def_bool y + prompt "Support for SCLP line mode terminal" + depends on S390 && TTY help Include support for IBM SCLP line-mode terminals. config SCLP_CONSOLE - bool "Support for console on SCLP line mode terminal" + def_bool y + prompt "Support for console on SCLP line mode terminal" depends on SCLP_TTY help Include support for using an IBM HWC line-mode terminal as the Linux system console. config SCLP_VT220_TTY - bool "Support for SCLP VT220-compatible terminal" - depends on S390 + def_bool y + prompt "Support for SCLP VT220-compatible terminal" + depends on S390 && TTY help Include support for an IBM SCLP VT220-compatible terminal. config SCLP_VT220_CONSOLE - bool "Support for console on SCLP VT220-compatible terminal" + def_bool y + prompt "Support for console on SCLP VT220-compatible terminal" depends on SCLP_VT220_TTY help Include support for using an IBM SCLP VT220-compatible terminal as a Linux system console. config SCLP_CPI - tristate "Control-Program Identification" + def_tristate m + prompt "Control-Program Identification" depends on S390 help This option enables the hardware console interface for system @@ -83,7 +92,8 @@ config SCLP_CPI need this feature and intend to run your kernel in LPAR. config SCLP_ASYNC - tristate "Support for Call Home via Asynchronous SCLP Records" + def_tristate m + prompt "Support for Call Home via Asynchronous SCLP Records" depends on S390 help This option enables the call home function, which is able to inform @@ -93,7 +103,8 @@ config SCLP_ASYNC need this feature and intend to run your kernel in LPAR. config S390_TAPE - tristate "S/390 tape device support" + def_tristate m + prompt "S/390 tape device support" depends on CCW help Select this option if you want to access channel-attached tape @@ -105,25 +116,12 @@ config S390_TAPE called tape390 and include all selected interfaces and hardware drivers. -comment "S/390 tape interface support" - depends on S390_TAPE - -config S390_TAPE_BLOCK - bool "Support for tape block devices" - depends on S390_TAPE && BLOCK - help - Select this option if you want to access your channel-attached tape - devices using the block device interface. This interface is similar - to CD-ROM devices on other platforms. The tapes can only be - accessed read-only when using this interface. Have a look at - <file:Documentation/s390/TAPE> for further information about creating - volumes for and using this interface. It is safe to say "Y" here. - comment "S/390 tape hardware support" depends on S390_TAPE config S390_TAPE_34XX - tristate "Support for 3480/3490 tape hardware" + def_tristate m + prompt "Support for 3480/3490 tape hardware" depends on S390_TAPE help Select this option if you want to access IBM 3480/3490 magnetic @@ -131,7 +129,8 @@ config S390_TAPE_34XX It is safe to say "Y" here. config S390_TAPE_3590 - tristate "Support for 3590 tape hardware" + def_tristate m + prompt "Support for 3590 tape hardware" depends on S390_TAPE help Select this option if you want to access IBM 3590 magnetic @@ -139,7 +138,8 @@ config S390_TAPE_3590 It is safe to say "Y" here. config VMLOGRDR - tristate "Support for the z/VM recording system services (VM only)" + def_tristate m + prompt "Support for the z/VM recording system services (VM only)" depends on IUCV help Select this option if you want to be able to receive records collected @@ -148,30 +148,31 @@ config VMLOGRDR This driver depends on the IUCV support driver. config VMCP - tristate "Support for the z/VM CP interface (VM only)" + def_bool y + prompt "Support for the z/VM CP interface" depends on S390 help Select this option if you want to be able to interact with the control program on z/VM - config MONREADER - tristate "API for reading z/VM monitor service records" + def_tristate m + prompt "API for reading z/VM monitor service records" depends on IUCV help Character device driver for reading z/VM monitor service records config MONWRITER - tristate "API for writing z/VM monitor service records" + def_tristate m + prompt "API for writing z/VM monitor service records" depends on S390 - default "m" help Character device driver for writing z/VM monitor service records config S390_VMUR - tristate "z/VM unit record device driver" + def_tristate m + prompt "z/VM unit record device driver" depends on S390 - default "m" help Character device driver for z/VM reader, puncher and printer. diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index efb500ab66c..78b6ace7edc 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -3,7 +3,8 @@ # obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ - sclp_cmd.o sclp_config.o sclp_cpi_sys.o + sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ + sclp_early.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o @@ -18,11 +19,9 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o -obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o obj-$(CONFIG_VMCP) += vmcp.o -tape-$(CONFIG_S390_TAPE_BLOCK) += tape_block.o tape-$(CONFIG_PROC_FS) += tape_proc.o tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y) obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o @@ -33,4 +32,4 @@ obj-$(CONFIG_MONWRITER) += monwriter.o obj-$(CONFIG_S390_VMUR) += vmur.o zcore_mod-objs := sclp_sdias.o zcore.o -obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o +obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 59ec073724b..5af7f0bd612 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -20,6 +20,7 @@ #include <linux/interrupt.h> #include <linux/err.h> #include <linux/reboot.h> +#include <linux/serial.h> /* ASYNC_* flags */ #include <linux/slab.h> #include <asm/ccwdev.h> #include <asm/cio.h> @@ -44,14 +45,11 @@ #define RAW3215_TIMEOUT HZ/10 /* time for delayed output */ #define RAW3215_FIXED 1 /* 3215 console device is not be freed */ -#define RAW3215_ACTIVE 2 /* set if the device is in use */ #define RAW3215_WORKING 4 /* set if a request is being worked on */ #define RAW3215_THROTTLED 8 /* set if reading is disabled */ #define RAW3215_STOPPED 16 /* set if writing is disabled */ -#define RAW3215_CLOSING 32 /* set while in close process */ #define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */ #define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */ -#define RAW3215_FROZEN 256 /* set if 3215 is frozen for suspend */ #define TAB_STOP_SIZE 8 /* tab stop size */ @@ -76,6 +74,7 @@ struct raw3215_req { } __attribute__ ((aligned(8))); struct raw3215_info { + struct tty_port port; struct ccw_device *cdev; /* device for tty driver */ spinlock_t *lock; /* pointer to irq lock */ int flags; /* state flags */ @@ -84,9 +83,9 @@ struct raw3215_info { int head; /* first free byte in output buffer */ int count; /* number of bytes in output buffer */ int written; /* number of bytes in write requests */ - struct tty_struct *tty; /* pointer to tty structure if present */ struct raw3215_req *queued_read; /* pointer to queued read requests */ struct raw3215_req *queued_write;/* pointer to queued write requests */ + struct tasklet_struct tlet; /* tasklet to invoke tty_wakeup */ wait_queue_head_t empty_wait; /* wait queue for flushing */ struct timer_list timer; /* timer for delayed output */ int line_pos; /* position on the line (for tabs) */ @@ -292,7 +291,7 @@ static void raw3215_timeout(unsigned long __data) if (raw->flags & RAW3215_TIMER_RUNS) { del_timer(&raw->timer); raw->flags &= ~RAW3215_TIMER_RUNS; - if (!(raw->flags & RAW3215_FROZEN)) { + if (!(raw->port.flags & ASYNC_SUSPENDED)) { raw3215_mk_write_req(raw); raw3215_start_io(raw); } @@ -308,7 +307,8 @@ static void raw3215_timeout(unsigned long __data) */ static inline void raw3215_try_io(struct raw3215_info *raw) { - if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FROZEN)) + if (!(raw->port.flags & ASYNC_INITIALIZED) || + (raw->port.flags & ASYNC_SUSPENDED)) return; if (raw->queued_read != NULL) raw3215_start_io(raw); @@ -323,10 +323,7 @@ static inline void raw3215_try_io(struct raw3215_info *raw) } } else if (!(raw->flags & RAW3215_TIMER_RUNS)) { /* delay small writes */ - init_timer(&raw->timer); raw->timer.expires = RAW3215_TIMEOUT + jiffies; - raw->timer.data = (unsigned long) raw; - raw->timer.function = raw3215_timeout; add_timer(&raw->timer); raw->flags |= RAW3215_TIMER_RUNS; } @@ -334,19 +331,29 @@ static inline void raw3215_try_io(struct raw3215_info *raw) } /* - * Try to start the next IO and wake up processes waiting on the tty. + * Call tty_wakeup from tasklet context */ -static void raw3215_next_io(struct raw3215_info *raw) +static void raw3215_wakeup(unsigned long data) { + struct raw3215_info *raw = (struct raw3215_info *) data; struct tty_struct *tty; + tty = tty_port_tty_get(&raw->port); + if (tty) { + tty_wakeup(tty); + tty_kref_put(tty); + } +} + +/* + * Try to start the next IO and wake up processes waiting on the tty. + */ +static void raw3215_next_io(struct raw3215_info *raw, struct tty_struct *tty) +{ raw3215_mk_write_req(raw); raw3215_try_io(raw); - tty = raw->tty; - if (tty != NULL && - RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) { - tty_wakeup(tty); - } + if (tty && RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) + tasklet_schedule(&raw->tlet); } /* @@ -363,10 +370,11 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, raw = dev_get_drvdata(&cdev->dev); req = (struct raw3215_req *) intparm; + tty = tty_port_tty_get(&raw->port); cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; if (cstat != 0) - raw3215_next_io(raw); + raw3215_next_io(raw, tty); if (dstat & 0x01) { /* we got a unit exception */ dstat &= ~0x01; /* we can ignore it */ } @@ -376,13 +384,13 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, break; /* Attention interrupt, someone hit the enter key */ raw3215_mk_read_req(raw); - raw3215_next_io(raw); + raw3215_next_io(raw, tty); break; case 0x08: case 0x0C: /* Channel end interrupt. */ if ((raw = req->info) == NULL) - return; /* That shouldn't happen ... */ + goto put_tty; /* That shouldn't happen ... */ if (req->type == RAW3215_READ) { /* store residual count, then wait for device end */ req->residual = irb->scsw.cmd.count; @@ -392,11 +400,10 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, case 0x04: /* Device end interrupt. */ if ((raw = req->info) == NULL) - return; /* That shouldn't happen ... */ - if (req->type == RAW3215_READ && raw->tty != NULL) { + goto put_tty; /* That shouldn't happen ... */ + if (req->type == RAW3215_READ && tty != NULL) { unsigned int cchar; - tty = raw->tty; count = 160 - req->residual; EBCASC(raw->inbuf, count); cchar = ctrlchar_handle(raw->inbuf, count, tty); @@ -405,8 +412,9 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, break; case CTRLCHAR_CTRL: - tty_insert_flip_char(tty, cchar, TTY_NORMAL); - tty_flip_buffer_push(raw->tty); + tty_insert_flip_char(&raw->port, cchar, + TTY_NORMAL); + tty_flip_buffer_push(&raw->port); break; case CTRLCHAR_NONE: @@ -418,8 +426,9 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, count++; } else count -= 2; - tty_insert_flip_string(tty, raw->inbuf, count); - tty_flip_buffer_push(raw->tty); + tty_insert_flip_string(&raw->port, raw->inbuf, + count); + tty_flip_buffer_push(&raw->port); break; } } else if (req->type == RAW3215_WRITE) { @@ -434,7 +443,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, raw->queued_read == NULL) { wake_up_interruptible(&raw->empty_wait); } - raw3215_next_io(raw); + raw3215_next_io(raw, tty); break; default: /* Strange interrupt, I'll do my best to clean up */ @@ -446,9 +455,10 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, raw->flags &= ~RAW3215_WORKING; raw3215_free_req(req); } - raw3215_next_io(raw); + raw3215_next_io(raw, tty); } - return; +put_tty: + tty_kref_put(tty); } /* @@ -482,7 +492,7 @@ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length) /* While console is frozen for suspend we have no other * choice but to drop message from the buffer to make * room for even more messages. */ - if (raw->flags & RAW3215_FROZEN) { + if (raw->port.flags & ASYNC_SUSPENDED) { raw3215_drop_line(raw); continue; } @@ -492,7 +502,7 @@ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length) raw3215_try_io(raw); raw->flags &= ~RAW3215_FLUSHING; #ifdef CONFIG_TN3215_CONSOLE - wait_cons_dev(); + ccw_device_wait_idle(raw->cdev); #endif /* Enough room freed up ? */ if (RAW3215_BUFFER_SIZE - raw->count >= length) @@ -604,10 +614,10 @@ static int raw3215_startup(struct raw3215_info *raw) { unsigned long flags; - if (raw->flags & RAW3215_ACTIVE) + if (raw->port.flags & ASYNC_INITIALIZED) return 0; raw->line_pos = 0; - raw->flags |= RAW3215_ACTIVE; + raw->port.flags |= ASYNC_INITIALIZED; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_try_io(raw); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); @@ -623,14 +633,15 @@ static void raw3215_shutdown(struct raw3215_info *raw) DECLARE_WAITQUEUE(wait, current); unsigned long flags; - if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED)) + if (!(raw->port.flags & ASYNC_INITIALIZED) || + (raw->flags & RAW3215_FIXED)) return; /* Wait for outstanding requests, then free irq */ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); if ((raw->flags & RAW3215_WORKING) || raw->queued_write != NULL || raw->queued_read != NULL) { - raw->flags |= RAW3215_CLOSING; + raw->port.flags |= ASYNC_CLOSING; add_wait_queue(&raw->empty_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); @@ -638,11 +649,42 @@ static void raw3215_shutdown(struct raw3215_info *raw) spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); remove_wait_queue(&raw->empty_wait, &wait); set_current_state(TASK_RUNNING); - raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING); + raw->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING); } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } +static struct raw3215_info *raw3215_alloc_info(void) +{ + struct raw3215_info *info; + + info = kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA); + if (!info) + return NULL; + + info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); + info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA); + if (!info->buffer || !info->inbuf) { + kfree(info); + return NULL; + } + + setup_timer(&info->timer, raw3215_timeout, (unsigned long)info); + init_waitqueue_head(&info->empty_wait); + tasklet_init(&info->tlet, raw3215_wakeup, (unsigned long)info); + tty_port_init(&info->port); + + return info; +} + +static void raw3215_free_info(struct raw3215_info *raw) +{ + kfree(raw->inbuf); + kfree(raw->buffer); + tty_port_destroy(&raw->port); + kfree(raw); +} + static int raw3215_probe (struct ccw_device *cdev) { struct raw3215_info *raw; @@ -651,11 +693,15 @@ static int raw3215_probe (struct ccw_device *cdev) /* Console is special. */ if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev))) return 0; - raw = kmalloc(sizeof(struct raw3215_info) + - RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA); + + raw = raw3215_alloc_info(); if (raw == NULL) return -ENOMEM; + raw->cdev = cdev; + dev_set_drvdata(&cdev->dev, raw); + cdev->handler = raw3215_irq; + spin_lock(&raw3215_device_lock); for (line = 0; line < NR_3215; line++) { if (!raw3215[line]) { @@ -665,40 +711,29 @@ static int raw3215_probe (struct ccw_device *cdev) } spin_unlock(&raw3215_device_lock); if (line == NR_3215) { - kfree(raw); + raw3215_free_info(raw); return -ENODEV; } - raw->cdev = cdev; - raw->inbuf = (char *) raw + sizeof(struct raw3215_info); - memset(raw, 0, sizeof(struct raw3215_info)); - raw->buffer = kmalloc(RAW3215_BUFFER_SIZE, - GFP_KERNEL|GFP_DMA); - if (raw->buffer == NULL) { - spin_lock(&raw3215_device_lock); - raw3215[line] = NULL; - spin_unlock(&raw3215_device_lock); - kfree(raw); - return -ENOMEM; - } - init_waitqueue_head(&raw->empty_wait); - - dev_set_drvdata(&cdev->dev, raw); - cdev->handler = raw3215_irq; - return 0; } static void raw3215_remove (struct ccw_device *cdev) { struct raw3215_info *raw; + unsigned int line; ccw_device_set_offline(cdev); raw = dev_get_drvdata(&cdev->dev); if (raw) { + spin_lock(&raw3215_device_lock); + for (line = 0; line < NR_3215; line++) + if (raw3215[line] == raw) + break; + raw3215[line] = NULL; + spin_unlock(&raw3215_device_lock); dev_set_drvdata(&cdev->dev, NULL); - kfree(raw->buffer); - kfree(raw); + raw3215_free_info(raw); } } @@ -735,7 +770,7 @@ static int raw3215_pm_stop(struct ccw_device *cdev) raw = dev_get_drvdata(&cdev->dev); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_make_room(raw, RAW3215_BUFFER_SIZE); - raw->flags |= RAW3215_FROZEN; + raw->port.flags |= ASYNC_SUSPENDED; spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); return 0; } @@ -748,7 +783,7 @@ static int raw3215_pm_start(struct ccw_device *cdev) /* Allow I/O again and flush output buffer. */ raw = dev_get_drvdata(&cdev->dev); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); - raw->flags &= ~RAW3215_FROZEN; + raw->port.flags &= ~ASYNC_SUSPENDED; raw->flags |= RAW3215_FLUSHING; raw3215_try_io(raw); raw->flags &= ~RAW3215_FLUSHING; @@ -762,8 +797,10 @@ static struct ccw_device_id raw3215_id[] = { }; static struct ccw_driver raw3215_ccw_driver = { - .name = "3215", - .owner = THIS_MODULE, + .driver = { + .name = "3215", + .owner = THIS_MODULE, + }, .ids = raw3215_id, .probe = &raw3215_probe, .remove = &raw3215_remove, @@ -772,6 +809,7 @@ static struct ccw_driver raw3215_ccw_driver = { .freeze = &raw3215_pm_stop, .thaw = &raw3215_pm_start, .restore = &raw3215_pm_start, + .int_class = IRQIO_C15, }; #ifdef CONFIG_TN3215_CONSOLE @@ -818,9 +856,9 @@ static void con3215_flush(void) unsigned long flags; raw = raw3215[0]; /* console 3215 is the first one */ - if (raw->flags & RAW3215_FROZEN) + if (raw->port.flags & ASYNC_SUSPENDED) /* The console is still frozen for suspend. */ - if (ccw_device_force_console()) + if (ccw_device_force_console(raw->cdev)) /* Forcing didn't work, no panic message .. */ return; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); @@ -884,26 +922,26 @@ static int __init con3215_init(void) raw3215_freelist = req; } - cdev = ccw_device_probe_console(); + cdev = ccw_device_create_console(&raw3215_ccw_driver); if (IS_ERR(cdev)) return -ENODEV; - raw3215[0] = raw = (struct raw3215_info *) - kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA); - raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); - raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA); + raw3215[0] = raw = raw3215_alloc_info(); raw->cdev = cdev; dev_set_drvdata(&cdev->dev, raw); cdev->handler = raw3215_irq; raw->flags |= RAW3215_FIXED; - init_waitqueue_head(&raw->empty_wait); + if (ccw_device_enable_console(cdev)) { + ccw_device_destroy_console(cdev); + raw3215_free_info(raw); + raw3215[0] = NULL; + return -ENODEV; + } /* Request the console irq */ if (raw3215_startup(raw) != 0) { - kfree(raw->inbuf); - kfree(raw->buffer); - kfree(raw); + raw3215_free_info(raw); raw3215[0] = NULL; return -ENODEV; } @@ -915,6 +953,19 @@ static int __init con3215_init(void) console_initcall(con3215_init); #endif +static int tty3215_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct raw3215_info *raw; + + raw = raw3215[tty->index]; + if (raw == NULL) + return -ENODEV; + + tty->driver_data = raw; + + return tty_port_install(&raw->port, driver, tty); +} + /* * tty3215_open * @@ -922,21 +973,12 @@ console_initcall(con3215_init); */ static int tty3215_open(struct tty_struct *tty, struct file * filp) { - struct raw3215_info *raw; - int retval, line; - - line = tty->index; - if ((line < 0) || (line >= NR_3215)) - return -ENODEV; + struct raw3215_info *raw = tty->driver_data; + int retval; - raw = raw3215[line]; - if (raw == NULL) - return -ENODEV; - - tty->driver_data = raw; - raw->tty = tty; + tty_port_tty_set(&raw->port, tty); - tty->low_latency = 0; /* don't use bottom half for pushing chars */ + raw->port.low_latency = 0; /* don't use bottom half for pushing chars */ /* * Start up 3215 device */ @@ -963,8 +1005,9 @@ static void tty3215_close(struct tty_struct *tty, struct file * filp) tty->closing = 1; /* Shutdown the terminal */ raw3215_shutdown(raw); + tasklet_kill(&raw->tlet); tty->closing = 0; - raw->tty = NULL; + tty_port_tty_set(&raw->port, NULL); } /* @@ -1093,6 +1136,7 @@ static void tty3215_start(struct tty_struct *tty) } static const struct tty_operations tty3215_ops = { + .install = tty3215_install, .open = tty3215_open, .close = tty3215_close, .write = tty3215_write, @@ -1134,7 +1178,6 @@ static int __init tty3215_init(void) * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc */ - driver->owner = THIS_MODULE; driver->driver_name = "tty3215"; driver->name = "ttyS"; driver->major = TTY_MAJOR; diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 6bca81aea39..75ffe9980c3 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -7,11 +7,13 @@ * Copyright IBM Corp. 2003, 2009 */ +#include <linux/module.h> #include <linux/console.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/types.h> +#include <linux/slab.h> #include <linux/err.h> #include <linux/reboot.h> @@ -29,12 +31,14 @@ static struct raw3270_fn con3270_fn; +static bool auto_update = 1; +module_param(auto_update, bool, 0); + /* * Main 3270 console view data structure. */ struct con3270 { struct raw3270_view view; - spinlock_t lock; struct list_head freemem; /* list of free memory for strings. */ /* Output stuff. */ @@ -204,6 +208,8 @@ con3270_update(struct con3270 *cp) struct string *s, *n; int rc; + if (!auto_update && !raw3270_view_active(&cp->view)) + return; if (cp->view.dev) raw3270_activate_view(&cp->view); @@ -529,6 +535,7 @@ con3270_flush(void) if (!cp->view.dev) return; raw3270_pm_unfreeze(&cp->view); + raw3270_activate_view(&cp->view); spin_lock_irqsave(&cp->view.lock, flags); con3270_wait_write(cp); cp->nr_up = 0; @@ -576,7 +583,6 @@ static struct console con3270 = { static int __init con3270_init(void) { - struct ccw_device *cdev; struct raw3270 *rp; void *cbuf; int i; @@ -591,10 +597,7 @@ con3270_init(void) cpcmd("TERM AUTOCR OFF", NULL, 0, NULL); } - cdev = ccw_device_probe_console(); - if (IS_ERR(cdev)) - return -ENODEV; - rp = raw3270_setup_console(cdev); + rp = raw3270_setup_console(); if (IS_ERR(rp)) return PTR_ERR(rp); diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c index c6cbcb3f925..8de2deb176d 100644 --- a/drivers/s390/char/ctrlchar.c +++ b/drivers/s390/char/ctrlchar.c @@ -1,8 +1,7 @@ /* - * drivers/s390/char/ctrlchar.c * Unified handling of special chars. * - * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2001 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com> * */ @@ -16,12 +15,11 @@ #ifdef CONFIG_MAGIC_SYSRQ static int ctrlchar_sysrq_key; -static struct tty_struct *sysrq_tty; static void ctrlchar_handle_sysrq(struct work_struct *work) { - handle_sysrq(ctrlchar_sysrq_key, sysrq_tty); + handle_sysrq(ctrlchar_sysrq_key); } static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); @@ -54,7 +52,6 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty) /* racy */ if (len == 3 && buf[1] == '-') { ctrlchar_sysrq_key = buf[2]; - sysrq_tty = tty; schedule_work(&ctrlchar_work); return CTRLCHAR_SYSRQ; } diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h index 935ffa0ea7c..1a53552f498 100644 --- a/drivers/s390/char/ctrlchar.h +++ b/drivers/s390/char/ctrlchar.h @@ -1,8 +1,7 @@ /* - * drivers/s390/char/ctrlchar.c * Unified handling of special chars. * - * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2001 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com> * */ diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 31c59b0d6df..71e97473801 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -11,9 +11,11 @@ #include <linux/console.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/compat.h> +#include <linux/module.h> #include <linux/list.h> +#include <linux/slab.h> #include <linux/types.h> -#include <linux/smp_lock.h> #include <asm/compat.h> #include <asm/ccwdev.h> @@ -431,9 +433,9 @@ fs3270_open(struct inode *inode, struct file *filp) struct idal_buffer *ib; int minor, rc = 0; - if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR) + if (imajor(file_inode(filp)) != IBM_FS3270_MAJOR) return -ENODEV; - minor = iminor(filp->f_path.dentry->d_inode); + minor = iminor(file_inode(filp)); /* Check for minor 0 multiplexer. */ if (minor == 0) { struct tty_struct *tty = get_current_tty(); @@ -441,7 +443,7 @@ fs3270_open(struct inode *inode, struct file *filp) tty_kref_put(tty); return -ENODEV; } - minor = tty->index + RAW3270_FIRSTMINOR; + minor = tty->index; tty_kref_put(tty); } mutex_lock(&fs3270_mutex); @@ -483,6 +485,7 @@ fs3270_open(struct inode *inode, struct file *filp) raw3270_del_view(&fp->view); goto out; } + nonseekable_open(inode, filp); filp->private_data = fp; out: mutex_unlock(&fs3270_mutex); @@ -518,6 +521,26 @@ static const struct file_operations fs3270_fops = { .compat_ioctl = fs3270_ioctl, /* ioctl */ .open = fs3270_open, /* open */ .release = fs3270_close, /* release */ + .llseek = no_llseek, +}; + +static void fs3270_create_cb(int minor) +{ + __register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops); + device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor), + NULL, "3270/tub%d", minor); +} + +static void fs3270_destroy_cb(int minor) +{ + device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor)); + __unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub"); +} + +static struct raw3270_notifier fs3270_notifier = +{ + .create = fs3270_create_cb, + .destroy = fs3270_destroy_cb, }; /* @@ -528,16 +551,21 @@ fs3270_init(void) { int rc; - rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); + rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops); if (rc) return rc; + device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0), + NULL, "3270/tub"); + raw3270_register_notifier(&fs3270_notifier); return 0; } static void __exit fs3270_exit(void) { - unregister_chrdev(IBM_FS3270_MAJOR, "fs3270"); + raw3270_unregister_notifier(&fs3270_notifier); + device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0)); + __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270"); } MODULE_LICENSE("GPL"); diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index cee4d4e4242..01463b052ae 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -1,14 +1,14 @@ /* - * drivers/s390/char/keyboard.c * ebcdic keycode functions for s390 console drivers * * S390 version - * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2003 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), */ #include <linux/module.h> #include <linux/sched.h> +#include <linux/slab.h> #include <linux/sysrq.h> #include <linux/consolemap.h> @@ -48,7 +48,7 @@ static unsigned char ret_diacr[NR_DEAD] = { struct kbd_data * kbd_alloc(void) { struct kbd_data *kbd; - int i, len; + int i; kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL); if (!kbd) @@ -58,12 +58,11 @@ kbd_alloc(void) { goto out_kbd; for (i = 0; i < ARRAY_SIZE(key_maps); i++) { if (key_maps[i]) { - kbd->key_maps[i] = - kmalloc(sizeof(u_short)*NR_KEYS, GFP_KERNEL); + kbd->key_maps[i] = kmemdup(key_maps[i], + sizeof(u_short) * NR_KEYS, + GFP_KERNEL); if (!kbd->key_maps[i]) goto out_maps; - memcpy(kbd->key_maps[i], key_maps[i], - sizeof(u_short)*NR_KEYS); } } kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL); @@ -71,23 +70,21 @@ kbd_alloc(void) { goto out_maps; for (i = 0; i < ARRAY_SIZE(func_table); i++) { if (func_table[i]) { - len = strlen(func_table[i]) + 1; - kbd->func_table[i] = kmalloc(len, GFP_KERNEL); + kbd->func_table[i] = kstrdup(func_table[i], + GFP_KERNEL); if (!kbd->func_table[i]) goto out_func; - memcpy(kbd->func_table[i], func_table[i], len); } } kbd->fn_handler = kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); if (!kbd->fn_handler) goto out_func; - kbd->accent_table = - kmalloc(sizeof(struct kbdiacruc)*MAX_DIACR, GFP_KERNEL); + kbd->accent_table = kmemdup(accent_table, + sizeof(struct kbdiacruc) * MAX_DIACR, + GFP_KERNEL); if (!kbd->accent_table) goto out_fn_handler; - memcpy(kbd->accent_table, accent_table, - sizeof(struct kbdiacruc)*MAX_DIACR); kbd->accent_table_size = accent_table_size; return kbd; @@ -201,7 +198,7 @@ handle_diacr(struct kbd_data *kbd, unsigned int ch) if (ch == ' ' || ch == d) return d; - kbd_put_queue(kbd->tty, d); + kbd_put_queue(kbd->port, d); return ch; } @@ -223,7 +220,7 @@ k_self(struct kbd_data *kbd, unsigned char value) { if (kbd->diacr) value = handle_diacr(kbd, value); - kbd_put_queue(kbd->tty, value); + kbd_put_queue(kbd->port, value); } /* @@ -241,7 +238,7 @@ static void k_fn(struct kbd_data *kbd, unsigned char value) { if (kbd->func_table[value]) - kbd_puts_queue(kbd->tty, kbd->func_table[value]); + kbd_puts_queue(kbd->port, kbd->func_table[value]); } static void @@ -259,20 +256,20 @@ k_spec(struct kbd_data *kbd, unsigned char value) * but we need only 16 bits here */ static void -to_utf8(struct tty_struct *tty, ushort c) +to_utf8(struct tty_port *port, ushort c) { if (c < 0x80) /* 0******* */ - kbd_put_queue(tty, c); + kbd_put_queue(port, c); else if (c < 0x800) { /* 110***** 10****** */ - kbd_put_queue(tty, 0xc0 | (c >> 6)); - kbd_put_queue(tty, 0x80 | (c & 0x3f)); + kbd_put_queue(port, 0xc0 | (c >> 6)); + kbd_put_queue(port, 0x80 | (c & 0x3f)); } else { /* 1110**** 10****** 10****** */ - kbd_put_queue(tty, 0xe0 | (c >> 12)); - kbd_put_queue(tty, 0x80 | ((c >> 6) & 0x3f)); - kbd_put_queue(tty, 0x80 | (c & 0x3f)); + kbd_put_queue(port, 0xe0 | (c >> 12)); + kbd_put_queue(port, 0x80 | ((c >> 6) & 0x3f)); + kbd_put_queue(port, 0x80 | (c & 0x3f)); } } @@ -285,7 +282,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode) unsigned short keysym; unsigned char type, value; - if (!kbd || !kbd->tty) + if (!kbd) return; if (keycode >= 384) @@ -307,7 +304,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode) if (kbd->sysrq) { if (kbd->sysrq == K(KT_LATIN, '-')) { kbd->sysrq = 0; - handle_sysrq(value, kbd->tty); + handle_sysrq(value); return; } if (value == '-') { @@ -325,7 +322,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode) #endif (*k_handler[type])(kbd, value); } else - to_utf8(kbd->tty, keysym); + to_utf8(kbd->port, keysym); } /* @@ -457,12 +454,12 @@ do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs, return 0; } -int -kbd_ioctl(struct kbd_data *kbd, struct file *file, - unsigned int cmd, unsigned long arg) +int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg) { + struct tty_struct *tty; void __user *argp; - int ct, perm; + unsigned int ct; + int perm; argp = (void __user *)arg; @@ -470,7 +467,10 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file, * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. */ - perm = current->signal->tty == kbd->tty || capable(CAP_SYS_TTY_CONFIG); + tty = tty_port_tty_get(kbd->port); + /* FIXME this test is pretty racy */ + perm = current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG); + tty_kref_put(tty); switch (cmd) { case KDGKBTYPE: return put_user(KB_101, (char __user *)argp); diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h index 5ccfe9cf126..a31f339211d 100644 --- a/drivers/s390/char/keyboard.h +++ b/drivers/s390/char/keyboard.h @@ -1,8 +1,7 @@ /* - * drivers/s390/char/keyboard.h * ebcdic keycode functions for s390 console drivers * - * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2003 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), */ @@ -21,7 +20,7 @@ typedef void (fn_handler_fn)(struct kbd_data *); */ struct kbd_data { - struct tty_struct *tty; + struct tty_port *port; unsigned short **key_maps; char **func_table; fn_handler_fn **fn_handler; @@ -36,22 +35,22 @@ void kbd_free(struct kbd_data *); void kbd_ascebc(struct kbd_data *, unsigned char *); void kbd_keycode(struct kbd_data *, unsigned int); -int kbd_ioctl(struct kbd_data *, struct file *, unsigned int, unsigned long); +int kbd_ioctl(struct kbd_data *, unsigned int, unsigned long); /* * Helper Functions. */ static inline void -kbd_put_queue(struct tty_struct *tty, int ch) +kbd_put_queue(struct tty_port *port, int ch) { - tty_insert_flip_char(tty, ch, 0); - tty_schedule_flip(tty); + tty_insert_flip_char(port, ch, 0); + tty_schedule_flip(port); } static inline void -kbd_puts_queue(struct tty_struct *tty, char *cp) +kbd_puts_queue(struct tty_port *port, char *cp) { while (*cp) - tty_insert_flip_char(tty, *cp++, 0); - tty_schedule_flip(tty); + tty_insert_flip_char(port, *cp++, 0); + tty_schedule_flip(port); } diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 33e96484d54..0da3ae3cd63 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/device.h> +#include <linux/slab.h> #include <net/iucv/iucv.h> #include <asm/uaccess.h> #include <asm/ebcdic.h> @@ -173,8 +174,7 @@ static void mon_free_mem(struct mon_private *monpriv) int i; for (i = 0; i < MON_MSGLIM; i++) - if (monpriv->msg_array[i]) - kfree(monpriv->msg_array[i]); + kfree(monpriv->msg_array[i]); kfree(monpriv); } @@ -446,6 +446,7 @@ static const struct file_operations mon_fops = { .release = &mon_close, .read = &mon_read, .poll = &mon_poll, + .llseek = noop_llseek, }; static struct miscdevice mon_dev = { @@ -569,8 +570,11 @@ static int __init mon_init(void) if (rc) goto out_iucv; monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL); - if (!monreader_device) + if (!monreader_device) { + rc = -ENOMEM; goto out_driver; + } + dev_set_name(monreader_device, "monreader-dev"); monreader_device->bus = &iucv_bus; monreader_device->parent = iucv_root; @@ -626,7 +630,7 @@ out_iucv: static void __exit mon_exit(void) { segment_unload(mon_dcss_name); - WARN_ON(misc_deregister(&mon_dev) != 0); + misc_deregister(&mon_dev); device_unregister(monreader_device); driver_unregister(&monreader_driver); iucv_unregister(&monreader_iucv_handler, 1); diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 668a0579b26..668b32b0dc1 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -20,6 +20,7 @@ #include <linux/poll.h> #include <linux/mutex.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <asm/uaccess.h> #include <asm/ebcdic.h> #include <asm/io.h> @@ -59,7 +60,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) struct appldata_product_id id; int rc; - strcpy(id.prod_nr, "LNXAPPL"); + strncpy(id.prod_nr, "LNXAPPL", 7); id.prod_fn = myhdr->applid; id.record_nr = myhdr->record_num; id.version_nr = myhdr->version; @@ -96,7 +97,7 @@ static int monwrite_new_hdr(struct mon_private *monpriv) { struct monwrite_hdr *monhdr = &monpriv->hdr; struct mon_buf *monbuf; - int rc; + int rc = 0; if (monhdr->datalen > MONWRITE_MAX_DATALEN || monhdr->mon_function > MONWRITE_START_CONFIG || @@ -134,7 +135,7 @@ static int monwrite_new_hdr(struct mon_private *monpriv) mon_buf_count++; } monpriv->current_buf = monbuf; - return 0; + return rc; } static int monwrite_new_data(struct mon_private *monpriv) @@ -273,6 +274,7 @@ static const struct file_operations monwrite_fops = { .open = &monwrite_open, .release = &monwrite_close, .write = &monwrite_write, + .llseek = noop_llseek, }; static struct miscdevice mon_dev = { @@ -379,7 +381,7 @@ out_driver: static void __exit mon_exit(void) { - WARN_ON(misc_deregister(&mon_dev) != 0); + misc_deregister(&mon_dev); platform_device_unregister(monwriter_pdev); platform_driver_unregister(&monwriter_pdrv); } diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 2a4c566456e..220acb4cbee 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -28,7 +28,7 @@ #include <linux/device.h> #include <linux/mutex.h> -static struct class *class3270; +struct class *class3270; /* The main 3270 data structure. */ struct raw3270 { @@ -37,6 +37,7 @@ struct raw3270 { int minor; short model, rows, cols; + unsigned int state; unsigned long flags; struct list_head req_queue; /* Request queue. */ @@ -46,20 +47,26 @@ struct raw3270 { struct timer_list timer; /* Device timer. */ unsigned char *ascebc; /* ascii -> ebcdic table */ - struct device *clttydev; /* 3270-class tty device ptr */ - struct device *cltubdev; /* 3270-class tub device ptr */ - struct raw3270_request init_request; + struct raw3270_view init_view; + struct raw3270_request init_reset; + struct raw3270_request init_readpart; + struct raw3270_request init_readmod; unsigned char init_data[256]; }; +/* raw3270->state */ +#define RAW3270_STATE_INIT 0 /* Initial state */ +#define RAW3270_STATE_RESET 1 /* Reset command is pending */ +#define RAW3270_STATE_W4ATTN 2 /* Wait for attention interrupt */ +#define RAW3270_STATE_READMOD 3 /* Read partition is pending */ +#define RAW3270_STATE_READY 4 /* Device is usable by views */ + /* raw3270->flags */ #define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */ #define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */ -#define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */ -#define RAW3270_FLAGS_READY 4 /* Device is useable by views */ -#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ -#define RAW3270_FLAGS_FROZEN 16 /* set if 3270 is frozen for suspend */ +#define RAW3270_FLAGS_CONSOLE 2 /* Device is the console. */ +#define RAW3270_FLAGS_FROZEN 3 /* set if 3270 is frozen for suspend */ /* Semaphore to protect global data of raw3270 (devices, views, etc). */ static DEFINE_MUTEX(raw3270_mutex); @@ -75,7 +82,7 @@ static LIST_HEAD(raw3270_devices); static int raw3270_registered; /* Module parameters */ -static int tubxcorrect = 0; +static bool tubxcorrect = 0; module_param(tubxcorrect, bool, 0); /* @@ -97,6 +104,17 @@ static unsigned char raw3270_ebcgraf[64] = { 0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f }; +static inline int raw3270_state_ready(struct raw3270 *rp) +{ + return rp->state == RAW3270_STATE_READY; +} + +static inline int raw3270_state_final(struct raw3270 *rp) +{ + return rp->state == RAW3270_STATE_INIT || + rp->state == RAW3270_STATE_READY; +} + void raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr) { @@ -214,7 +232,7 @@ raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib) * Stop running ccw. */ static int -raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq) +__raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq) { int retries; int rc; @@ -233,18 +251,6 @@ raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq) return rc; } -static int -raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq) -{ - unsigned long flags; - int rc; - - spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - rc = raw3270_halt_io_nolock(rp, rq); - spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); - return rc; -} - /* * Add the request to the request queue, try to start it if the * 3270 device is idle. Return without waiting for end of i/o. @@ -270,6 +276,15 @@ __raw3270_start(struct raw3270 *rp, struct raw3270_view *view, } int +raw3270_view_active(struct raw3270_view *view) +{ + struct raw3270 *rp = view->dev; + + return rp && rp->view == view && + !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags); +} + +int raw3270_start(struct raw3270_view *view, struct raw3270_request *rq) { unsigned long flags; @@ -281,8 +296,8 @@ raw3270_start(struct raw3270_view *view, struct raw3270_request *rq) if (!rp || rp->view != view || test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) - rc = -ENODEV; + else if (!raw3270_state_ready(rp)) + rc = -EBUSY; else rc = __raw3270_start(rp, view, rq); spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); @@ -299,8 +314,8 @@ raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq) if (!rp || rp->view != view || test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) - rc = -ENODEV; + else if (!raw3270_state_ready(rp)) + rc = -EBUSY; else rc = __raw3270_start(rp, view, rq); return rc; @@ -378,7 +393,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) case RAW3270_IO_STOP: if (!rq) break; - raw3270_halt_io_nolock(rp, rq); + __raw3270_halt_io(rp, rq); rq->rc = -EIO; break; default: @@ -413,9 +428,14 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) } /* - * Size sensing. + * To determine the size of the 3270 device we need to do: + * 1) send a 'read partition' data stream to the device + * 2) wait for the attn interrupt that precedes the query reply + * 3) do a read modified to get the query reply + * To make things worse we have to cope with intervention + * required (3270 device switched to 'stand-by') and command + * rejects (old devices that can't do 'read partition'). */ - struct raw3270_ua { /* Query Reply structure for Usable Area */ struct { /* Usable Area Query Reply Base */ short l; /* Length of this structured field */ @@ -451,117 +471,21 @@ struct raw3270_ua { /* Query Reply structure for Usable Area */ } __attribute__ ((packed)) aua; } __attribute__ ((packed)); -static struct diag210 raw3270_init_diag210; -static DEFINE_MUTEX(raw3270_init_mutex); - -static int -raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, - struct irb *irb) -{ - /* - * Unit-Check Processing: - * Expect Command Reject or Intervention Required. - */ - if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { - /* Request finished abnormally. */ - if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { - set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); - return RAW3270_IO_BUSY; - } - } - if (rq) { - if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { - if (irb->ecw[0] & SNS0_CMD_REJECT) - rq->rc = -EOPNOTSUPP; - else - rq->rc = -EIO; - } else - /* Request finished normally. Copy residual count. */ - rq->rescnt = irb->scsw.cmd.count; - } - if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { - set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags); - wake_up(&raw3270_wait_queue); - } - return RAW3270_IO_DONE; -} - -static struct raw3270_fn raw3270_init_fn = { - .intv = raw3270_init_irq -}; - -static struct raw3270_view raw3270_init_view = { - .fn = &raw3270_init_fn -}; - -/* - * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup - * Wait for end of request. The request must have been started - * with raw3270_start, rc = 0. The device lock may NOT have been - * released between calling raw3270_start and raw3270_wait. - */ static void -raw3270_wake_init(struct raw3270_request *rq, void *data) -{ - wake_up((wait_queue_head_t *) data); -} - -/* - * Special wait function that can cope with console initialization. - */ -static int -raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view, - struct raw3270_request *rq) -{ - unsigned long flags; - int rc; - -#ifdef CONFIG_TN3270_CONSOLE - if (raw3270_registered == 0) { - spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); - rq->callback = NULL; - rc = __raw3270_start(rp, view, rq); - if (rc == 0) - while (!raw3270_request_final(rq)) { - wait_cons_dev(); - barrier(); - } - spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); - return rq->rc; - } -#endif - rq->callback = raw3270_wake_init; - rq->callback_data = &raw3270_wait_queue; - spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); - rc = __raw3270_start(rp, view, rq); - spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); - if (rc) - return rc; - /* Now wait for the completion. */ - rc = wait_event_interruptible(raw3270_wait_queue, - raw3270_request_final(rq)); - if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ - raw3270_halt_io(view->dev, rq); - /* No wait for the halt to complete. */ - wait_event(raw3270_wait_queue, raw3270_request_final(rq)); - return -ERESTARTSYS; - } - return rq->rc; -} - -static int -__raw3270_size_device_vm(struct raw3270 *rp) +raw3270_size_device_vm(struct raw3270 *rp) { int rc, model; struct ccw_dev_id dev_id; + struct diag210 diag_data; ccw_device_get_id(rp->cdev, &dev_id); - raw3270_init_diag210.vrdcdvno = dev_id.devno; - raw3270_init_diag210.vrdclen = sizeof(struct diag210); - rc = diag210(&raw3270_init_diag210); - if (rc) - return rc; - model = raw3270_init_diag210.vrdccrmd; + diag_data.vrdcdvno = dev_id.devno; + diag_data.vrdclen = sizeof(struct diag210); + rc = diag210(&diag_data); + model = diag_data.vrdccrmd; + /* Use default model 2 if the size could not be detected */ + if (rc || model < 2 || model > 5) + model = 2; switch (model) { case 2: rp->model = model; @@ -583,79 +507,25 @@ __raw3270_size_device_vm(struct raw3270 *rp) rp->rows = 27; rp->cols = 132; break; - default: - rc = -EOPNOTSUPP; - break; } - return rc; } -static int -__raw3270_size_device(struct raw3270 *rp) +static void +raw3270_size_device(struct raw3270 *rp) { - static const unsigned char wbuf[] = - { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 }; struct raw3270_ua *uap; - unsigned short count; - int rc; - - /* - * To determine the size of the 3270 device we need to do: - * 1) send a 'read partition' data stream to the device - * 2) wait for the attn interrupt that preceeds the query reply - * 3) do a read modified to get the query reply - * To make things worse we have to cope with intervention - * required (3270 device switched to 'stand-by') and command - * rejects (old devices that can't do 'read partition'). - */ - memset(&rp->init_request, 0, sizeof(rp->init_request)); - memset(&rp->init_data, 0, 256); - /* Store 'read partition' data stream to init_data */ - memcpy(&rp->init_data, wbuf, sizeof(wbuf)); - INIT_LIST_HEAD(&rp->init_request.list); - rp->init_request.ccw.cmd_code = TC_WRITESF; - rp->init_request.ccw.flags = CCW_FLAG_SLI; - rp->init_request.ccw.count = sizeof(wbuf); - rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data); - - rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); - if (rc) - /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */ - return rc; - /* Wait for attention interrupt. */ -#ifdef CONFIG_TN3270_CONSOLE - if (raw3270_registered == 0) { - unsigned long flags; - - spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags)) - wait_cons_dev(); - spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); - } else -#endif - rc = wait_event_interruptible(raw3270_wait_queue, - test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags)); - if (rc) - return rc; - - /* - * The device accepted the 'read partition' command. Now - * set up a read ccw and issue it. - */ - rp->init_request.ccw.cmd_code = TC_READMOD; - rp->init_request.ccw.flags = CCW_FLAG_SLI; - rp->init_request.ccw.count = sizeof(rp->init_data); - rp->init_request.ccw.cda = (__u32) __pa(rp->init_data); - rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); - if (rc) - return rc; /* Got a Query Reply */ - count = sizeof(rp->init_data) - rp->init_request.rescnt; uap = (struct raw3270_ua *) (rp->init_data + 1); /* Paranoia check. */ - if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81) - return -EOPNOTSUPP; + if (rp->init_readmod.rc || rp->init_data[0] != 0x88 || + uap->uab.qcode != 0x81) { + /* Couldn't detect size. Use default model 2. */ + rp->model = 2; + rp->rows = 24; + rp->cols = 80; + return; + } /* Copy rows/columns of default Usable Area */ rp->rows = uap->uab.h; rp->cols = uap->uab.w; @@ -668,66 +538,133 @@ __raw3270_size_device(struct raw3270 *rp) rp->rows = uap->aua.hauai; rp->cols = uap->aua.wauai; } - return 0; + /* Try to find a model. */ + rp->model = 0; + if (rp->rows == 24 && rp->cols == 80) + rp->model = 2; + if (rp->rows == 32 && rp->cols == 80) + rp->model = 3; + if (rp->rows == 43 && rp->cols == 80) + rp->model = 4; + if (rp->rows == 27 && rp->cols == 132) + rp->model = 5; } -static int -raw3270_size_device(struct raw3270 *rp) +static void +raw3270_size_device_done(struct raw3270 *rp) { - int rc; + struct raw3270_view *view; - mutex_lock(&raw3270_init_mutex); - rp->view = &raw3270_init_view; - raw3270_init_view.dev = rp; - if (MACHINE_IS_VM) - rc = __raw3270_size_device_vm(rp); - else - rc = __raw3270_size_device(rp); - raw3270_init_view.dev = NULL; rp->view = NULL; - mutex_unlock(&raw3270_init_mutex); - if (rc == 0) { /* Found something. */ - /* Try to find a model. */ - rp->model = 0; - if (rp->rows == 24 && rp->cols == 80) - rp->model = 2; - if (rp->rows == 32 && rp->cols == 80) - rp->model = 3; - if (rp->rows == 43 && rp->cols == 80) - rp->model = 4; - if (rp->rows == 27 && rp->cols == 132) - rp->model = 5; - } else { - /* Couldn't detect size. Use default model 2. */ - rp->model = 2; - rp->rows = 24; - rp->cols = 80; - return 0; + rp->state = RAW3270_STATE_READY; + /* Notify views about new size */ + list_for_each_entry(view, &rp->view_list, list) + if (view->fn->resize) + view->fn->resize(view, rp->model, rp->rows, rp->cols); + /* Setup processing done, now activate a view */ + list_for_each_entry(view, &rp->view_list, list) { + rp->view = view; + if (view->fn->activate(view) == 0) + break; + rp->view = NULL; } +} + +static void +raw3270_read_modified_cb(struct raw3270_request *rq, void *data) +{ + struct raw3270 *rp = rq->view->dev; + + raw3270_size_device(rp); + raw3270_size_device_done(rp); +} + +static void +raw3270_read_modified(struct raw3270 *rp) +{ + if (rp->state != RAW3270_STATE_W4ATTN) + return; + /* Use 'read modified' to get the result of a read partition. */ + memset(&rp->init_readmod, 0, sizeof(rp->init_readmod)); + memset(&rp->init_data, 0, sizeof(rp->init_data)); + rp->init_readmod.ccw.cmd_code = TC_READMOD; + rp->init_readmod.ccw.flags = CCW_FLAG_SLI; + rp->init_readmod.ccw.count = sizeof(rp->init_data); + rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data); + rp->init_readmod.callback = raw3270_read_modified_cb; + rp->state = RAW3270_STATE_READMOD; + raw3270_start_irq(&rp->init_view, &rp->init_readmod); +} + +static void +raw3270_writesf_readpart(struct raw3270 *rp) +{ + static const unsigned char wbuf[] = + { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 }; + + /* Store 'read partition' data stream to init_data */ + memset(&rp->init_readpart, 0, sizeof(rp->init_readpart)); + memset(&rp->init_data, 0, sizeof(rp->init_data)); + memcpy(&rp->init_data, wbuf, sizeof(wbuf)); + rp->init_readpart.ccw.cmd_code = TC_WRITESF; + rp->init_readpart.ccw.flags = CCW_FLAG_SLI; + rp->init_readpart.ccw.count = sizeof(wbuf); + rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data); + rp->state = RAW3270_STATE_W4ATTN; + raw3270_start_irq(&rp->init_view, &rp->init_readpart); +} + +/* + * Device reset + */ +static void +raw3270_reset_device_cb(struct raw3270_request *rq, void *data) +{ + struct raw3270 *rp = rq->view->dev; + + if (rp->state != RAW3270_STATE_RESET) + return; + if (rq->rc) { + /* Reset command failed. */ + rp->state = RAW3270_STATE_INIT; + } else if (MACHINE_IS_VM) { + raw3270_size_device_vm(rp); + raw3270_size_device_done(rp); + } else + raw3270_writesf_readpart(rp); + memset(&rp->init_reset, 0, sizeof(rp->init_reset)); +} + +static int +__raw3270_reset_device(struct raw3270 *rp) +{ + int rc; + + /* Check if reset is already pending */ + if (rp->init_reset.view) + return -EBUSY; + /* Store reset data stream to init_data/init_reset */ + rp->init_data[0] = TW_KR; + rp->init_reset.ccw.cmd_code = TC_EWRITEA; + rp->init_reset.ccw.flags = CCW_FLAG_SLI; + rp->init_reset.ccw.count = 1; + rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data); + rp->init_reset.callback = raw3270_reset_device_cb; + rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset); + if (rc == 0 && rp->state == RAW3270_STATE_INIT) + rp->state = RAW3270_STATE_RESET; return rc; } static int raw3270_reset_device(struct raw3270 *rp) { + unsigned long flags; int rc; - mutex_lock(&raw3270_init_mutex); - memset(&rp->init_request, 0, sizeof(rp->init_request)); - memset(&rp->init_data, 0, sizeof(rp->init_data)); - /* Store reset data stream to init_data/init_request */ - rp->init_data[0] = TW_KR; - INIT_LIST_HEAD(&rp->init_request.list); - rp->init_request.ccw.cmd_code = TC_EWRITEA; - rp->init_request.ccw.flags = CCW_FLAG_SLI; - rp->init_request.ccw.count = 1; - rp->init_request.ccw.cda = (__u32) __pa(rp->init_data); - rp->view = &raw3270_init_view; - raw3270_init_view.dev = rp; - rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); - raw3270_init_view.dev = NULL; - rp->view = NULL; - mutex_unlock(&raw3270_init_mutex); + spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); + rc = __raw3270_reset_device(rp); + spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); return rc; } @@ -741,13 +678,50 @@ raw3270_reset(struct raw3270_view *view) if (!rp || rp->view != view || test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) - rc = -ENODEV; + else if (!raw3270_state_ready(rp)) + rc = -EBUSY; else rc = raw3270_reset_device(view->dev); return rc; } +static int +raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq, + struct irb *irb) +{ + struct raw3270 *rp; + + /* + * Unit-Check Processing: + * Expect Command Reject or Intervention Required. + */ + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { + /* Request finished abnormally. */ + if (irb->ecw[0] & SNS0_INTERVENTION_REQ) { + set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags); + return RAW3270_IO_BUSY; + } + } + if (rq) { + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { + if (irb->ecw[0] & SNS0_CMD_REJECT) + rq->rc = -EOPNOTSUPP; + else + rq->rc = -EIO; + } + } + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { + /* Queue read modified after attention interrupt */ + rp = view->dev; + raw3270_read_modified(rp); + } + return RAW3270_IO_DONE; +} + +static struct raw3270_fn raw3270_init_fn = { + .intv = raw3270_init_irq +}; + /* * Setup new 3270 device. */ @@ -776,6 +750,10 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) INIT_LIST_HEAD(&rp->req_queue); INIT_LIST_HEAD(&rp->view_list); + rp->init_view.dev = rp; + rp->init_view.fn = &raw3270_init_fn; + rp->view = &rp->init_view; + /* * Add device to list and find the smallest unused minor * number for it. Note: there is no device with minor 0, @@ -809,31 +787,46 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) } #ifdef CONFIG_TN3270_CONSOLE +/* Tentative definition - see below for actual definition. */ +static struct ccw_driver raw3270_ccw_driver; + /* * Setup 3270 device configured as console. */ -struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) +struct raw3270 __init *raw3270_setup_console(void) { + struct ccw_device *cdev; + unsigned long flags; struct raw3270 *rp; char *ascebc; int rc; + cdev = ccw_device_create_console(&raw3270_ccw_driver); + if (IS_ERR(cdev)) + return ERR_CAST(cdev); + rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); ascebc = kzalloc(256, GFP_KERNEL); rc = raw3270_setup_device(cdev, rp, ascebc); if (rc) return ERR_PTR(rc); set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags); - rc = raw3270_reset_device(rp); - if (rc) - return ERR_PTR(rc); - rc = raw3270_size_device(rp); - if (rc) - return ERR_PTR(rc); - rc = raw3270_reset_device(rp); - if (rc) + + rc = ccw_device_enable_console(cdev); + if (rc) { + ccw_device_destroy_console(cdev); return ERR_PTR(rc); - set_bit(RAW3270_FLAGS_READY, &rp->flags); + } + + spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); + do { + __raw3270_reset_device(rp); + while (!raw3270_state_final(rp)) { + ccw_device_wait_idle(rp->cdev); + barrier(); + } + } while (rp->state != RAW3270_STATE_READY); + spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); return rp; } @@ -843,7 +836,7 @@ raw3270_wait_cons_dev(struct raw3270 *rp) unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - wait_cons_dev(); + ccw_device_wait_idle(rp->cdev); spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); } @@ -859,7 +852,7 @@ raw3270_create_device(struct ccw_device *cdev) char *ascebc; int rc; - rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); + rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); if (!rp) return ERR_PTR(-ENOMEM); ascebc = kmalloc(256, GFP_KERNEL); @@ -895,13 +888,13 @@ raw3270_activate_view(struct raw3270_view *view) spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); if (rp->view == view) rc = 0; - else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) - rc = -ENODEV; + else if (!raw3270_state_ready(rp)) + rc = -EBUSY; else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) rc = -EACCES; else { oldview = NULL; - if (rp->view) { + if (rp->view && rp->view->fn->deactivate) { oldview = rp->view; oldview->fn->deactivate(oldview); } @@ -946,7 +939,7 @@ raw3270_deactivate_view(struct raw3270_view *view) list_del_init(&view->list); list_add_tail(&view->list, &rp->view_list); /* Try to activate another view. */ - if (test_bit(RAW3270_FLAGS_READY, &rp->flags) && + if (raw3270_state_ready(rp) && !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { list_for_each_entry(view, &rp->view_list, list) { rp->view = view; @@ -977,18 +970,16 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) if (rp->minor != minor) continue; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { - atomic_set(&view->ref_count, 2); - view->dev = rp; - view->fn = fn; - view->model = rp->model; - view->rows = rp->rows; - view->cols = rp->cols; - view->ascebc = rp->ascebc; - spin_lock_init(&view->lock); - list_add(&view->list, &rp->view_list); - rc = 0; - } + atomic_set(&view->ref_count, 2); + view->dev = rp; + view->fn = fn; + view->model = rp->model; + view->rows = rp->rows; + view->cols = rp->cols; + view->ascebc = rp->ascebc; + spin_lock_init(&view->lock); + list_add(&view->list, &rp->view_list); + rc = 0; spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); break; } @@ -1012,14 +1003,11 @@ raw3270_find_view(struct raw3270_fn *fn, int minor) if (rp->minor != minor) continue; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { - view = ERR_PTR(-ENOENT); - list_for_each_entry(tmp, &rp->view_list, list) { - if (tmp->fn == fn) { - raw3270_get_view(tmp); - view = tmp; - break; - } + list_for_each_entry(tmp, &rp->view_list, list) { + if (tmp->fn == fn) { + raw3270_get_view(tmp); + view = tmp; + break; } } spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); @@ -1046,7 +1034,7 @@ raw3270_del_view(struct raw3270_view *view) rp->view = NULL; } list_del_init(&view->list); - if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) && + if (!rp->view && raw3270_state_ready(rp) && !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { /* Try to activate another view. */ list_for_each_entry(nv, &rp->view_list, list) { @@ -1074,10 +1062,6 @@ raw3270_delete_device(struct raw3270 *rp) /* Remove from device chain. */ mutex_lock(&raw3270_mutex); - if (rp->clttydev && !IS_ERR(rp->clttydev)) - device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); - if (rp->cltubdev && !IS_ERR(rp->cltubdev)) - device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor)); list_del_init(&rp->list); mutex_unlock(&raw3270_mutex); @@ -1141,75 +1125,34 @@ static struct attribute_group raw3270_attr_group = { static int raw3270_create_attributes(struct raw3270 *rp) { - int rc; - - rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); - if (rc) - goto out; - - rp->clttydev = device_create(class3270, &rp->cdev->dev, - MKDEV(IBM_TTY3270_MAJOR, rp->minor), NULL, - "tty%s", dev_name(&rp->cdev->dev)); - if (IS_ERR(rp->clttydev)) { - rc = PTR_ERR(rp->clttydev); - goto out_ttydev; - } - - rp->cltubdev = device_create(class3270, &rp->cdev->dev, - MKDEV(IBM_FS3270_MAJOR, rp->minor), NULL, - "tub%s", dev_name(&rp->cdev->dev)); - if (!IS_ERR(rp->cltubdev)) - goto out; - - rc = PTR_ERR(rp->cltubdev); - device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); - -out_ttydev: - sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group); -out: - return rc; + return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); } /* * Notifier for device addition/removal */ -struct raw3270_notifier { - struct list_head list; - void (*notifier)(int, int); -}; - static LIST_HEAD(raw3270_notifier); -int raw3270_register_notifier(void (*notifier)(int, int)) +int raw3270_register_notifier(struct raw3270_notifier *notifier) { - struct raw3270_notifier *np; struct raw3270 *rp; - np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL); - if (!np) - return -ENOMEM; - np->notifier = notifier; mutex_lock(&raw3270_mutex); - list_add_tail(&np->list, &raw3270_notifier); - list_for_each_entry(rp, &raw3270_devices, list) { - get_device(&rp->cdev->dev); - notifier(rp->minor, 1); - } + list_add_tail(¬ifier->list, &raw3270_notifier); + list_for_each_entry(rp, &raw3270_devices, list) + notifier->create(rp->minor); mutex_unlock(&raw3270_mutex); return 0; } -void raw3270_unregister_notifier(void (*notifier)(int, int)) +void raw3270_unregister_notifier(struct raw3270_notifier *notifier) { - struct raw3270_notifier *np; + struct raw3270 *rp; mutex_lock(&raw3270_mutex); - list_for_each_entry(np, &raw3270_notifier, list) - if (np->notifier == notifier) { - list_del(&np->list); - kfree(np); - break; - } + list_for_each_entry(rp, &raw3270_devices, list) + notifier->destroy(rp->minor); + list_del(¬ifier->list); mutex_unlock(&raw3270_mutex); } @@ -1219,29 +1162,20 @@ void raw3270_unregister_notifier(void (*notifier)(int, int)) static int raw3270_set_online (struct ccw_device *cdev) { - struct raw3270 *rp; struct raw3270_notifier *np; + struct raw3270 *rp; int rc; rp = raw3270_create_device(cdev); if (IS_ERR(rp)) return PTR_ERR(rp); - rc = raw3270_reset_device(rp); - if (rc) - goto failure; - rc = raw3270_size_device(rp); - if (rc) - goto failure; - rc = raw3270_reset_device(rp); - if (rc) - goto failure; rc = raw3270_create_attributes(rp); if (rc) goto failure; - set_bit(RAW3270_FLAGS_READY, &rp->flags); + raw3270_reset_device(rp); mutex_lock(&raw3270_mutex); list_for_each_entry(np, &raw3270_notifier, list) - np->notifier(rp->minor, 1); + np->create(rp->minor); mutex_unlock(&raw3270_mutex); return 0; @@ -1270,14 +1204,14 @@ raw3270_remove (struct ccw_device *cdev) */ if (rp == NULL) return; - clear_bit(RAW3270_FLAGS_READY, &rp->flags); sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group); /* Deactivate current view and remove all views. */ spin_lock_irqsave(get_ccwdev_lock(cdev), flags); if (rp->view) { - rp->view->fn->deactivate(rp->view); + if (rp->view->fn->deactivate) + rp->view->fn->deactivate(rp->view); rp->view = NULL; } while (!list_empty(&rp->view_list)) { @@ -1292,7 +1226,7 @@ raw3270_remove (struct ccw_device *cdev) mutex_lock(&raw3270_mutex); list_for_each_entry(np, &raw3270_notifier, list) - np->notifier(rp->minor, 0); + np->destroy(rp->minor); mutex_unlock(&raw3270_mutex); /* Reset 3270 device. */ @@ -1326,7 +1260,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev) if (!rp) return 0; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); - if (rp->view) + if (rp->view && rp->view->fn->deactivate) rp->view->fn->deactivate(rp->view); if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) { /* @@ -1353,7 +1287,7 @@ static int raw3270_pm_start(struct ccw_device *cdev) return 0; spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags); - if (rp->view) + if (rp->view && rp->view->fn->activate) rp->view->fn->activate(rp->view); spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); return 0; @@ -1366,7 +1300,7 @@ void raw3270_pm_unfreeze(struct raw3270_view *view) rp = view->dev; if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) - ccw_device_force_console(); + ccw_device_force_console(rp->cdev); #endif } @@ -1386,8 +1320,10 @@ static struct ccw_device_id raw3270_id[] = { }; static struct ccw_driver raw3270_ccw_driver = { - .name = "3270", - .owner = THIS_MODULE, + .driver = { + .name = "3270", + .owner = THIS_MODULE, + }, .ids = raw3270_id, .probe = &raw3270_probe, .remove = &raw3270_remove, @@ -1396,6 +1332,7 @@ static struct ccw_driver raw3270_ccw_driver = { .freeze = &raw3270_pm_stop, .thaw = &raw3270_pm_start, .restore = &raw3270_pm_start, + .int_class = IRQIO_C70, }; static int @@ -1433,6 +1370,7 @@ MODULE_LICENSE("GPL"); module_init(raw3270_init); module_exit(raw3270_exit); +EXPORT_SYMBOL(class3270); EXPORT_SYMBOL(raw3270_request_alloc); EXPORT_SYMBOL(raw3270_request_free); EXPORT_SYMBOL(raw3270_request_reset); diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index ed34eb2199c..e1e41c2861f 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -91,6 +91,7 @@ struct raw3270_iocb { struct raw3270; struct raw3270_view; +extern struct class *class3270; /* 3270 CCW request */ struct raw3270_request { @@ -140,6 +141,7 @@ struct raw3270_fn { struct raw3270_request *, struct irb *); void (*release)(struct raw3270_view *); void (*free)(struct raw3270_view *); + void (*resize)(struct raw3270_view *, int, int, int); }; /* @@ -171,6 +173,7 @@ int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *); int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *); int raw3270_reset(struct raw3270_view *); struct raw3270_view *raw3270_view(struct raw3270_view *); +int raw3270_view_active(struct raw3270_view *); /* Reference count inliner for view structures. */ static inline void @@ -188,12 +191,18 @@ raw3270_put_view(struct raw3270_view *view) wake_up(&raw3270_wait_queue); } -struct raw3270 *raw3270_setup_console(struct ccw_device *cdev); +struct raw3270 *raw3270_setup_console(void); void raw3270_wait_cons_dev(struct raw3270 *); /* Notifier for device addition/removal */ -int raw3270_register_notifier(void (*notifier)(int, int)); -void raw3270_unregister_notifier(void (*notifier)(int, int)); +struct raw3270_notifier { + struct list_head list; + void (*create)(int minor); + void (*destroy)(int minor); +}; + +int raw3270_register_notifier(struct raw3270_notifier *); +void raw3270_unregister_notifier(struct raw3270_notifier *); void raw3270_pm_unfreeze(struct raw3270_view *); /* diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f6d72e1f2a3..c316051d9bd 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -7,6 +7,7 @@ * Martin Schwidefsky <schwidefsky@de.ibm.com> */ +#include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/err.h> #include <linux/spinlock.h> @@ -19,15 +20,12 @@ #include <linux/completion.h> #include <linux/platform_device.h> #include <asm/types.h> -#include <asm/s390_ext.h> +#include <asm/irq.h> #include "sclp.h" #define SCLP_HEADER "sclp: " -/* Structure for register_early_external_interrupt. */ -static ext_int_info_t ext_int_info_hwc; - /* Lock to protect internal data consistency. */ static DEFINE_SPINLOCK(sclp_lock); @@ -52,16 +50,50 @@ static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); /* Suspend request */ static DECLARE_COMPLETION(sclp_request_queue_flushed); +/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */ +int sclp_console_pages = SCLP_CONSOLE_PAGES; +/* Flag to indicate if buffer pages are dropped on buffer full condition */ +int sclp_console_drop = 0; +/* Number of times the console dropped buffer pages */ +unsigned long sclp_console_full; + static void sclp_suspend_req_cb(struct sclp_req *req, void *data) { complete(&sclp_request_queue_flushed); } +static int __init sclp_setup_console_pages(char *str) +{ + int pages, rc; + + rc = kstrtoint(str, 0, &pages); + if (!rc && pages >= SCLP_CONSOLE_PAGES) + sclp_console_pages = pages; + return 1; +} + +__setup("sclp_con_pages=", sclp_setup_console_pages); + +static int __init sclp_setup_console_drop(char *str) +{ + int drop, rc; + + rc = kstrtoint(str, 0, &drop); + if (!rc && drop) + sclp_console_drop = 1; + return 1; +} + +__setup("sclp_con_drop=", sclp_setup_console_drop); + static struct sclp_req sclp_suspend_req; /* Timer for request retries. */ static struct timer_list sclp_request_timer; +/* Timer for queued requests. */ +static struct timer_list sclp_queue_timer; + /* Internal state: is the driver initialized? */ static volatile enum sclp_init_state_t { sclp_init_state_uninitialized, @@ -119,14 +151,19 @@ static int sclp_init(void); int sclp_service_call(sclp_cmdw_t command, void *sccb) { - int cc; + int cc = 4; /* Initialize for program check handling */ asm volatile( - " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ - " ipm %0\n" - " srl %0,28" - : "=&d" (cc) : "d" (command), "a" (__pa(sccb)) + "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ + "1: ipm %0\n" + " srl %0,28\n" + "2:\n" + EX_TABLE(0b, 2b) + EX_TABLE(1b, 2b) + : "+&d" (cc) : "d" (command), "a" (__pa(sccb)) : "cc", "memory"); + if (cc == 4) + return -EINVAL; if (cc == 3) return -EIO; if (cc == 2) @@ -181,6 +218,76 @@ sclp_request_timeout(unsigned long data) sclp_process_queue(); } +/* + * Returns the expire value in jiffies of the next pending request timeout, + * if any. Needs to be called with sclp_lock. + */ +static unsigned long __sclp_req_queue_find_next_timeout(void) +{ + unsigned long expires_next = 0; + struct sclp_req *req; + + list_for_each_entry(req, &sclp_req_queue, list) { + if (!req->queue_expires) + continue; + if (!expires_next || + (time_before(req->queue_expires, expires_next))) + expires_next = req->queue_expires; + } + return expires_next; +} + +/* + * Returns expired request, if any, and removes it from the list. + */ +static struct sclp_req *__sclp_req_queue_remove_expired_req(void) +{ + unsigned long flags, now; + struct sclp_req *req; + + spin_lock_irqsave(&sclp_lock, flags); + now = jiffies; + /* Don't need list_for_each_safe because we break out after list_del */ + list_for_each_entry(req, &sclp_req_queue, list) { + if (!req->queue_expires) + continue; + if (time_before_eq(req->queue_expires, now)) { + if (req->status == SCLP_REQ_QUEUED) { + req->status = SCLP_REQ_QUEUED_TIMEOUT; + list_del(&req->list); + goto out; + } + } + } + req = NULL; +out: + spin_unlock_irqrestore(&sclp_lock, flags); + return req; +} + +/* + * Timeout handler for queued requests. Removes request from list and + * invokes callback. This timer can be set per request in situations where + * waiting too long would be harmful to the system, e.g. during SE reboot. + */ +static void sclp_req_queue_timeout(unsigned long data) +{ + unsigned long flags, expires_next; + struct sclp_req *req; + + do { + req = __sclp_req_queue_remove_expired_req(); + if (req && req->callback) + req->callback(req, req->callback_data); + } while (req); + + spin_lock_irqsave(&sclp_lock, flags); + expires_next = __sclp_req_queue_find_next_timeout(); + if (expires_next) + mod_timer(&sclp_queue_timer, expires_next); + spin_unlock_irqrestore(&sclp_lock, flags); +} + /* Try to start a request. Return zero if the request was successfully * started or if it will be started at a later time. Return non-zero otherwise. * Called while sclp_lock is locked. */ @@ -283,6 +390,13 @@ sclp_add_request(struct sclp_req *req) req->start_count = 0; list_add_tail(&req->list, &sclp_req_queue); rc = 0; + if (req->queue_timeout) { + req->queue_expires = jiffies + req->queue_timeout * HZ; + if (!timer_pending(&sclp_queue_timer) || + time_after(sclp_queue_timer.expires, req->queue_expires)) + mod_timer(&sclp_queue_timer, req->queue_expires); + } else + req->queue_expires = 0; /* Start if request is first in list */ if (sclp_running_state == sclp_running_state_idle && req->list.prev == &sclp_req_queue) { @@ -336,7 +450,7 @@ sclp_dispatch_evbufs(struct sccb_header *sccb) reg->receiver_fn(evbuf); spin_lock_irqsave(&sclp_lock, flags); } else if (reg == NULL) - rc = -ENOSYS; + rc = -EOPNOTSUPP; } spin_unlock_irqrestore(&sclp_lock, flags); return rc; @@ -395,16 +509,17 @@ __sclp_find_req(u32 sccb) /* Handler for external interruption. Perform request post-processing. * Prepare read event data request if necessary. Start processing of next * request on queue. */ -static void -sclp_interrupt_handler(__u16 code) +static void sclp_interrupt_handler(struct ext_code ext_code, + unsigned int param32, unsigned long param64) { struct sclp_req *req; u32 finished_sccb; u32 evbuf_pending; + inc_irq_stat(IRQEXT_SCP); spin_lock(&sclp_lock); - finished_sccb = S390_lowcore.ext_params & 0xfffffff8; - evbuf_pending = S390_lowcore.ext_params & 0x3; + finished_sccb = param32 & 0xfffffff8; + evbuf_pending = param32 & 0x3; if (finished_sccb) { del_timer(&sclp_request_timer); sclp_running_state = sclp_running_state_reset_pending; @@ -451,7 +566,7 @@ sclp_sync_wait(void) timeout = 0; if (timer_pending(&sclp_request_timer)) { /* Get timeout TOD value */ - timeout = get_clock() + + timeout = get_tod_clock_fast() + sclp_tod_from_jiffies(sclp_request_timer.expires - jiffies); } @@ -468,12 +583,12 @@ sclp_sync_wait(void) cr0_sync &= 0xffff00a0; cr0_sync |= 0x00000200; __ctl_load(cr0_sync, 0, 0); - __raw_local_irq_stosm(0x01); + __arch_local_irq_stosm(0x01); /* Loop until driver state indicates finished request */ while (sclp_running_state != sclp_running_state_idle) { /* Check for expired request timer */ if (timer_pending(&sclp_request_timer) && - get_clock() > timeout && + get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer)) sclp_request_timer.function(sclp_request_timer.data); cpu_relax(); @@ -655,16 +770,6 @@ sclp_remove_processed(struct sccb_header *sccb) EXPORT_SYMBOL(sclp_remove_processed); -struct init_sccb { - struct sccb_header header; - u16 _reserved; - u16 mask_length; - sccb_mask_t receive_mask; - sccb_mask_t send_mask; - sccb_mask_t sclp_receive_mask; - sccb_mask_t sclp_send_mask; -} __attribute__((packed)); - /* Prepare init mask request. Called while sclp_lock is locked. */ static inline void __sclp_make_init_req(u32 receive_mask, u32 send_mask) @@ -819,12 +924,13 @@ EXPORT_SYMBOL(sclp_reactivate); /* Handler for external interruption used during initialization. Modify * request state to done. */ -static void -sclp_check_handler(__u16 code) +static void sclp_check_handler(struct ext_code ext_code, + unsigned int param32, unsigned long param64) { u32 finished_sccb; - finished_sccb = S390_lowcore.ext_params & 0xfffffff8; + inc_irq_stat(IRQEXT_SCP); + finished_sccb = param32 & 0xfffffff8; /* Is this the interrupt we are waiting for? */ if (finished_sccb == 0) return; @@ -866,8 +972,7 @@ sclp_check_interface(void) spin_lock_irqsave(&sclp_lock, flags); /* Prepare init mask command */ - rc = register_early_external_interrupt(0x2401, sclp_check_handler, - &ext_int_info_hwc); + rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); if (rc) { spin_unlock_irqrestore(&sclp_lock, flags); return rc; @@ -885,12 +990,12 @@ sclp_check_interface(void) spin_unlock_irqrestore(&sclp_lock, flags); /* Enable service-signal interruption - needs to happen * with IRQs enabled. */ - ctl_set_bit(0, 9); + irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); /* Wait for signal from interrupt or timeout */ sclp_sync_wait(); /* Disable service-signal interruption - needs to happen * with IRQs enabled. */ - ctl_clear_bit(0,9); + irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); spin_lock_irqsave(&sclp_lock, flags); del_timer(&sclp_request_timer); if (sclp_init_req.status == SCLP_REQ_DONE && @@ -900,8 +1005,7 @@ sclp_check_interface(void) } else rc = -EBUSY; } - unregister_early_external_interrupt(0x2401, sclp_check_handler, - &ext_int_info_hwc); + unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); spin_unlock_irqrestore(&sclp_lock, flags); return rc; } @@ -1025,11 +1129,47 @@ static const struct dev_pm_ops sclp_pm_ops = { .restore = sclp_restore, }; +static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf) +{ + return sprintf(buf, "%i\n", sclp_console_pages); +} + +static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL); + +static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf) +{ + return sprintf(buf, "%i\n", sclp_console_drop); +} + +static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL); + +static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf) +{ + return sprintf(buf, "%lu\n", sclp_console_full); +} + +static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL); + +static struct attribute *sclp_drv_attrs[] = { + &driver_attr_con_pages.attr, + &driver_attr_con_drop.attr, + &driver_attr_con_full.attr, + NULL, +}; +static struct attribute_group sclp_drv_attr_group = { + .attrs = sclp_drv_attrs, +}; +static const struct attribute_group *sclp_drv_attr_groups[] = { + &sclp_drv_attr_group, + NULL, +}; + static struct platform_driver sclp_pdrv = { .driver = { .name = "sclp", .owner = THIS_MODULE, .pm = &sclp_pm_ops, + .groups = sclp_drv_attr_groups, }, }; @@ -1053,6 +1193,8 @@ sclp_init(void) INIT_LIST_HEAD(&sclp_reg_list); list_add(&sclp_state_change_event.list, &sclp_reg_list); init_timer(&sclp_request_timer); + init_timer(&sclp_queue_timer); + sclp_queue_timer.function = sclp_req_queue_timeout; /* Check interface */ spin_unlock_irqrestore(&sclp_lock, flags); rc = sclp_check_interface(); @@ -1064,15 +1206,14 @@ sclp_init(void) if (rc) goto fail_init_state_uninitialized; /* Register interrupt handler */ - rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, - &ext_int_info_hwc); + rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler); if (rc) goto fail_unregister_reboot_notifier; sclp_init_state = sclp_init_state_initialized; spin_unlock_irqrestore(&sclp_lock, flags); /* Enable service-signal external interruption - needs to happen with * IRQs enabled. */ - ctl_set_bit(0, 9); + irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); sclp_init_mask(1); return 0; @@ -1109,10 +1250,12 @@ static __init int sclp_initcall(void) rc = platform_driver_register(&sclp_pdrv); if (rc) return rc; + sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); - rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; + rc = PTR_ERR_OR_ZERO(sclp_pdev); if (rc) goto fail_platform_driver_unregister; + rc = atomic_notifier_chain_register(&panic_notifier_list, &sclp_on_panic_nb); if (rc) diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 6bb5a6bdfab..a68b5ec7d04 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 1999, 2009 + * Copyright IBM Corp. 1999,2012 * * Author(s): Martin Peschke <mpeschke@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -15,7 +15,7 @@ /* maximum number of pages concerning our own memory management */ #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) -#define MAX_CONSOLE_PAGES 6 +#define SCLP_CONSOLE_PAGES 6 #define EVTYP_OPCMD 0x01 #define EVTYP_MSG 0x02 @@ -28,6 +28,7 @@ #define EVTYP_CONFMGMDATA 0x04 #define EVTYP_SDIAS 0x1C #define EVTYP_ASYNC 0x0A +#define EVTYP_OCF 0x1E #define EVTYP_OPCMD_MASK 0x80000000 #define EVTYP_MSG_MASK 0x40000000 @@ -40,6 +41,7 @@ #define EVTYP_CONFMGMDATA_MASK 0x10000000 #define EVTYP_SDIAS_MASK 0x00000010 #define EVTYP_ASYNC_MASK 0x00400000 +#define EVTYP_OCF_MASK 0x00000004 #define GNRLMSGFLGS_DOM 0x8000 #define GNRLMSGFLGS_SNDALRM 0x4000 @@ -86,11 +88,23 @@ struct sccb_header { u16 response_code; } __attribute__((packed)); +struct init_sccb { + struct sccb_header header; + u16 _reserved; + u16 mask_length; + sccb_mask_t receive_mask; + sccb_mask_t send_mask; + sccb_mask_t sclp_receive_mask; + sccb_mask_t sclp_send_mask; +} __attribute__((packed)); + extern u64 sclp_facilities; + #define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) #define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) #define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) #define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL) +#define SCLP_HAS_PCI_RECONFIG (sclp_facilities & 0x0000000040000000ULL) struct gds_subvector { @@ -119,6 +133,11 @@ struct sclp_req { /* Callback that is called after reaching final status. */ void (*callback)(struct sclp_req *, void *data); void *callback_data; + int queue_timeout; /* request queue timeout (sec), set by + caller of sclp_add_request(), if + needed */ + /* Internal fields */ + unsigned long queue_expires; /* request queue timeout (jiffies) */ }; #define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */ @@ -126,6 +145,9 @@ struct sclp_req { #define SCLP_REQ_RUNNING 0x02 /* request is currently running */ #define SCLP_REQ_DONE 0x03 /* request is completed successfully */ #define SCLP_REQ_FAILED 0x05 /* request is finally failed */ +#define SCLP_REQ_QUEUED_TIMEOUT 0x06 /* request on queue timed out */ + +#define SCLP_QUEUE_INTERVAL 5 /* timeout interval for request queue */ /* function pointers that a high level driver has to use for registration */ /* of some routines it wants to be called from the low level driver */ @@ -158,10 +180,19 @@ int sclp_remove_processed(struct sccb_header *sccb); int sclp_deactivate(void); int sclp_reactivate(void); int sclp_service_call(sclp_cmdw_t command, void *sccb); +int sclp_sync_request(sclp_cmdw_t command, void *sccb); +int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout); int sclp_sdias_init(void); void sclp_sdias_exit(void); +extern int sclp_console_pages; +extern int sclp_console_drop; +extern unsigned long sclp_console_full; +extern u8 sclp_fac84; +extern unsigned long long sclp_rzm; +extern unsigned long long sclp_rnmax; + /* useful inlines */ /* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */ @@ -186,4 +217,26 @@ sclp_ascebc_str(unsigned char *str, int nr) (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr); } +static inline struct gds_vector * +sclp_find_gds_vector(void *start, void *end, u16 id) +{ + struct gds_vector *v; + + for (v = start; (void *) v < end; v = (void *) v + v->length) + if (v->gds_id == id) + return v; + return NULL; +} + +static inline struct gds_subvector * +sclp_find_gds_subvector(void *start, void *end, u8 key) +{ + struct gds_subvector *sv; + + for (sv = start; (void *) sv < end; sv = (void *) sv + sv->length) + if (sv->key == key) + return sv; + return NULL; +} + #endif /* __SCLP_H__ */ diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index 740fe405c39..5f9f929e891 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c @@ -11,6 +11,7 @@ #include <linux/device.h> #include <linux/stat.h> #include <linux/string.h> +#include <linux/slab.h> #include <linux/ctype.h> #include <linux/kmod.h> #include <linux/err.h> @@ -81,11 +82,9 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write, return -EFAULT; } else { len = *count; - rc = copy_from_user(buf, buffer, sizeof(buf)); - if (rc != 0) - return -EFAULT; - if (strict_strtoul(buf, 0, &val) != 0) - return -EINVAL; + rc = kstrtoul_from_user(buffer, len, 0, &val); + if (rc) + return rc; if (val != 0 && val != 1) return -EINVAL; callhome_enabled = val; diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index fc7ae05ce48..6e14999f9e8 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2007, 2009 + * Copyright IBM Corp. 2007,2012 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> @@ -12,137 +12,22 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/memory.h> +#include <linux/module.h> #include <linux/platform_device.h> +#include <asm/ctl_reg.h> #include <asm/chpid.h> -#include <asm/sclp.h> #include <asm/setup.h> +#include <asm/page.h> +#include <asm/sclp.h> #include "sclp.h" -#define SCLP_CMDW_READ_SCP_INFO 0x00020001 -#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 - -struct read_info_sccb { - struct sccb_header header; /* 0-7 */ - u16 rnmax; /* 8-9 */ - u8 rnsize; /* 10 */ - u8 _reserved0[24 - 11]; /* 11-15 */ - u8 loadparm[8]; /* 24-31 */ - u8 _reserved1[48 - 32]; /* 32-47 */ - u64 facilities; /* 48-55 */ - u8 _reserved2[84 - 56]; /* 56-83 */ - u8 fac84; /* 84 */ - u8 _reserved3[91 - 85]; /* 85-90 */ - u8 flags; /* 91 */ - u8 _reserved4[100 - 92]; /* 92-99 */ - u32 rnsize2; /* 100-103 */ - u64 rnmax2; /* 104-111 */ - u8 _reserved5[4096 - 112]; /* 112-4095 */ -} __attribute__((packed, aligned(PAGE_SIZE))); - -static struct read_info_sccb __initdata early_read_info_sccb; -static int __initdata early_read_info_sccb_valid; - -u64 sclp_facilities; -static u8 sclp_fac84; -static unsigned long long rzm; -static unsigned long long rnmax; - -static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) -{ - int rc; - - __ctl_set_bit(0, 9); - rc = sclp_service_call(cmd, sccb); - if (rc) - goto out; - __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | - PSW_MASK_WAIT | PSW_DEFAULT_KEY); - local_irq_disable(); -out: - /* Contents of the sccb might have changed. */ - barrier(); - __ctl_clear_bit(0, 9); - return rc; -} - -static void __init sclp_read_info_early(void) -{ - int rc; - int i; - struct read_info_sccb *sccb; - sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, - SCLP_CMDW_READ_SCP_INFO}; - - sccb = &early_read_info_sccb; - for (i = 0; i < ARRAY_SIZE(commands); i++) { - do { - memset(sccb, 0, sizeof(*sccb)); - sccb->header.length = sizeof(*sccb); - sccb->header.function_code = 0x80; - sccb->header.control_mask[2] = 0x80; - rc = sclp_cmd_sync_early(commands[i], sccb); - } while (rc == -EBUSY); - - if (rc) - break; - if (sccb->header.response_code == 0x10) { - early_read_info_sccb_valid = 1; - break; - } - if (sccb->header.response_code != 0x1f0) - break; - } -} - -void __init sclp_facilities_detect(void) -{ - struct read_info_sccb *sccb; - - sclp_read_info_early(); - if (!early_read_info_sccb_valid) - return; - - sccb = &early_read_info_sccb; - sclp_facilities = sccb->facilities; - sclp_fac84 = sccb->fac84; - rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; - rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; - rzm <<= 20; -} - -unsigned long long sclp_get_rnmax(void) -{ - return rnmax; -} - -unsigned long long sclp_get_rzm(void) -{ - return rzm; -} - -/* - * This function will be called after sclp_facilities_detect(), which gets - * called from early.c code. Therefore the sccb should have valid contents. - */ -void __init sclp_get_ipl_info(struct sclp_ipl_info *info) -{ - struct read_info_sccb *sccb; - - if (!early_read_info_sccb_valid) - return; - sccb = &early_read_info_sccb; - info->is_valid = 1; - if (sccb->flags & 0x2) - info->has_dump = 1; - memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); -} - static void sclp_sync_callback(struct sclp_req *req, void *data) { struct completion *completion = data; @@ -150,7 +35,12 @@ static void sclp_sync_callback(struct sclp_req *req, void *data) complete(completion); } -static int do_sync_request(sclp_cmdw_t cmd, void *sccb) +int sclp_sync_request(sclp_cmdw_t cmd, void *sccb) +{ + return sclp_sync_request_timeout(cmd, sccb, 0); +} + +int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout) { struct completion completion; struct sclp_req *request; @@ -159,6 +49,8 @@ static int do_sync_request(sclp_cmdw_t cmd, void *sccb) request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) return -ENOMEM; + if (timeout) + request->queue_timeout = timeout; request->command = cmd; request->sccb = sccb; request->status = SCLP_REQ_FILLED; @@ -225,7 +117,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info) if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); - rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); + rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb, + SCLP_QUEUE_INTERVAL); if (rc) goto out; if (sccb->header.response_code != 0x0010) { @@ -259,7 +152,7 @@ static int do_cpu_configure(sclp_cmdw_t cmd) if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); - rc = do_sync_request(cmd, sccb); + rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); if (rc) goto out; switch (sccb->header.response_code) { @@ -300,7 +193,6 @@ struct memory_increment { struct list_head list; u16 rn; int standby; - int usecount; }; struct assign_storage_sccb { @@ -308,9 +200,16 @@ struct assign_storage_sccb { u16 rn; } __packed; +int arch_get_memory_phys_device(unsigned long start_pfn) +{ + if (!sclp_rzm) + return 0; + return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm); +} + static unsigned long long rn2addr(u16 rn) { - return (unsigned long long) (rn - 1) * rzm; + return (unsigned long long) (rn - 1) * sclp_rzm; } static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) @@ -323,7 +222,7 @@ static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) return -ENOMEM; sccb->header.length = PAGE_SIZE; sccb->rn = rn; - rc = do_sync_request(cmd, sccb); + rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); if (rc) goto out; switch (sccb->header.response_code) { @@ -344,7 +243,15 @@ out: static int sclp_assign_storage(u16 rn) { - return do_assign_storage(0x000d0001, rn); + unsigned long long start; + int rc; + + rc = do_assign_storage(0x000d0001, rn); + if (rc) + return rc; + start = rn2addr(rn); + storage_key_init_range(start, start + sclp_rzm); + return 0; } static int sclp_unassign_storage(u16 rn) @@ -370,14 +277,17 @@ static int sclp_attach_storage(u8 id) if (!sccb) return -ENOMEM; sccb->header.length = PAGE_SIZE; - rc = do_sync_request(0x00080001 | id << 8, sccb); + rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb, + SCLP_QUEUE_INTERVAL); if (rc) goto out; switch (sccb->header.response_code) { case 0x0020: set_bit(id, sclp_storage_ids); - for (i = 0; i < sccb->assigned; i++) - sclp_unassign_storage(sccb->entries[i] >> 16); + for (i = 0; i < sccb->assigned; i++) { + if (sccb->entries[i]) + sclp_unassign_storage(sccb->entries[i] >> 16); + } break; default: rc = -EIO; @@ -399,23 +309,12 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size, istart = rn2addr(incr->rn); if (start + size - 1 < istart) break; - if (start > istart + rzm - 1) + if (start > istart + sclp_rzm - 1) continue; - if (online) { - if (incr->usecount++) - continue; - /* - * Don't break the loop if one assign fails. Loop may - * be walked again on CANCEL and we can't save - * information if state changed before or not. - * So continue and increase usecount for all increments. - */ + if (online) rc |= sclp_assign_storage(incr->rn); - } else { - if (--incr->usecount) - continue; + else sclp_unassign_storage(incr->rn); - } } return rc ? -EIO : 0; } @@ -432,9 +331,8 @@ static int sclp_mem_notifier(struct notifier_block *nb, start = arg->start_pfn << PAGE_SHIFT; size = arg->nr_pages << PAGE_SHIFT; mutex_lock(&sclp_mem_mutex); - for (id = 0; id <= sclp_max_storage_id; id++) - if (!test_bit(id, sclp_storage_ids)) - sclp_attach_storage(id); + for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1) + sclp_attach_storage(id); switch (action) { case MEM_ONLINE: case MEM_GOING_OFFLINE: @@ -475,7 +373,7 @@ static void __init add_memory_merged(u16 rn) if (!first_rn) goto skip_add; start = rn2addr(first_rn); - size = (unsigned long long ) num * rzm; + size = (unsigned long long) num * sclp_rzm; if (start >= VMEM_MAX_PHYS) goto skip_add; if (start + size > VMEM_MAX_PHYS) @@ -523,7 +421,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned) } if (!assigned) new_incr->rn = last_rn + 1; - if (new_incr->rn > rnmax) { + if (new_incr->rn > sclp_rnmax) { kfree(new_incr); return; } @@ -564,7 +462,7 @@ static int __init sclp_detect_standby_memory(void) struct read_storage_sccb *sccb; int i, id, assigned, rc; - if (!early_read_info_sccb_valid) + if (OLDMEM_BASE) /* No standby memory in kdump mode */ return 0; if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) return 0; @@ -576,7 +474,7 @@ static int __init sclp_detect_standby_memory(void) for (id = 0; id <= sclp_max_storage_id; id++) { memset(sccb, 0, PAGE_SIZE); sccb->header.length = PAGE_SIZE; - rc = do_sync_request(0x00040001 | id << 8, sccb); + rc = sclp_sync_request(0x00040001 | id << 8, sccb); if (rc) goto out; switch (sccb->header.response_code) { @@ -608,7 +506,7 @@ static int __init sclp_detect_standby_memory(void) } if (rc || list_empty(&sclp_mem_list)) goto out; - for (i = 1; i <= rnmax - assigned; i++) + for (i = 1; i <= sclp_rnmax - assigned; i++) insert_increment(0, 1, 0); rc = register_memory_notifier(&sclp_mem_nb); if (rc) @@ -617,7 +515,7 @@ static int __init sclp_detect_standby_memory(void) if (rc) goto out; sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); - rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; + rc = PTR_ERR_OR_ZERO(sclp_pdev); if (rc) goto out_driver; sclp_add_standby_memory(); @@ -633,6 +531,67 @@ __initcall(sclp_detect_standby_memory); #endif /* CONFIG_MEMORY_HOTPLUG */ /* + * PCI I/O adapter configuration related functions. + */ +#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001 +#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001 + +#define SCLP_RECONFIG_PCI_ATPYE 2 + +struct pci_cfg_sccb { + struct sccb_header header; + u8 atype; /* adapter type */ + u8 reserved1; + u16 reserved2; + u32 aid; /* adapter identifier */ +} __packed; + +static int do_pci_configure(sclp_cmdw_t cmd, u32 fid) +{ + struct pci_cfg_sccb *sccb; + int rc; + + if (!SCLP_HAS_PCI_RECONFIG) + return -EOPNOTSUPP; + + sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccb) + return -ENOMEM; + + sccb->header.length = PAGE_SIZE; + sccb->atype = SCLP_RECONFIG_PCI_ATPYE; + sccb->aid = fid; + rc = sclp_sync_request(cmd, sccb); + if (rc) + goto out; + switch (sccb->header.response_code) { + case 0x0020: + case 0x0120: + break; + default: + pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n", + cmd, sccb->header.response_code); + rc = -EIO; + break; + } +out: + free_page((unsigned long) sccb); + return rc; +} + +int sclp_pci_configure(u32 fid) +{ + return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid); +} +EXPORT_SYMBOL(sclp_pci_configure); + +int sclp_pci_deconfigure(u32 fid) +{ + return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid); +} +EXPORT_SYMBOL(sclp_pci_deconfigure); + +/* * Channel path configuration related functions. */ @@ -659,7 +618,7 @@ static int do_chp_configure(sclp_cmdw_t cmd) if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); - rc = do_sync_request(cmd, sccb); + rc = sclp_sync_request(cmd, sccb); if (rc) goto out; switch (sccb->header.response_code) { @@ -704,13 +663,6 @@ int sclp_chp_deconfigure(struct chp_id chpid) return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); } -int arch_get_memory_phys_device(unsigned long start_pfn) -{ - if (!rzm) - return 0; - return PFN_PHYS(start_pfn) / rzm; -} - struct chp_info_sccb { struct sccb_header header; u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; @@ -741,7 +693,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info) if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); - rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); + rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); if (rc) goto out; if (sccb->header.response_code != 0x0010) { @@ -757,3 +709,8 @@ out: free_page((unsigned long) sccb); return rc; } + +bool sclp_has_sprp(void) +{ + return !!(sclp_fac84 & 0x2); +} diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index ad698d30cb3..5880def98fc 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -14,6 +14,7 @@ #include <linux/termios.h> #include <linux/err.h> #include <linux/reboot.h> +#include <linux/gfp.h> #include "sclp.h" #include "sclp_rw.h" @@ -129,6 +130,31 @@ sclp_console_timeout(unsigned long data) } /* + * Drop oldest console buffer if sclp_con_drop is set + */ +static int +sclp_console_drop_buffer(void) +{ + struct list_head *list; + struct sclp_buffer *buffer; + void *page; + + if (!sclp_console_drop) + return 0; + list = sclp_con_outqueue.next; + if (sclp_con_queue_running) + /* The first element is in I/O */ + list = list->next; + if (list == &sclp_con_outqueue) + return 0; + list_del(list); + buffer = list_entry(list, struct sclp_buffer, list); + page = sclp_unmake_buffer(buffer); + list_add_tail((struct list_head *) page, &sclp_con_pages); + return 1; +} + +/* * Writes the given message to S390 system console */ static void @@ -149,9 +175,13 @@ sclp_console_write(struct console *console, const char *message, do { /* make sure we have a console output buffer */ if (sclp_conbuf == NULL) { + if (list_empty(&sclp_con_pages)) + sclp_console_full++; while (list_empty(&sclp_con_pages)) { if (sclp_con_suspended) goto out; + if (sclp_console_drop_buffer()) + break; spin_unlock_irqrestore(&sclp_con_lock, flags); sclp_sync_wait(); spin_lock_irqsave(&sclp_con_lock, flags); @@ -296,7 +326,7 @@ sclp_console_init(void) return rc; /* Allocate pages for output buffering */ INIT_LIST_HEAD(&sclp_con_pages); - for (i = 0; i < MAX_CONSOLE_PAGES; i++) { + for (i = 0; i < sclp_console_pages; i++) { page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); list_add_tail(page, &sclp_con_pages); } diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index b497afe061c..94415620747 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -1,6 +1,4 @@ /* - * drivers/s390/char/sclp_config.c - * * Copyright IBM Corp. 2007 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ @@ -11,7 +9,7 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/cpu.h> -#include <linux/sysdev.h> +#include <linux/device.h> #include <linux/workqueue.h> #include <asm/smp.h> @@ -31,13 +29,14 @@ static struct work_struct sclp_cpu_change_work; static void sclp_cpu_capability_notify(struct work_struct *work) { int cpu; - struct sys_device *sysdev; + struct device *dev; - pr_warning("cpu capability changed.\n"); + s390_adjust_jiffies(); + pr_info("CPU capability may have changed\n"); get_online_cpus(); for_each_online_cpu(cpu) { - sysdev = get_cpu_sysdev(cpu); - kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); + dev = get_cpu_device(cpu); + kobject_uevent(&dev->kobj, KOBJ_CHANGE); } put_online_cpus(); } @@ -70,21 +69,9 @@ static struct sclp_register sclp_conf_register = static int __init sclp_conf_init(void) { - int rc; - INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); - - rc = sclp_register(&sclp_conf_register); - if (rc) - return rc; - - if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { - pr_warning("no configuration management.\n"); - sclp_unregister(&sclp_conf_register); - rc = -ENOSYS; - } - return rc; + return sclp_register(&sclp_conf_register); } __initcall(sclp_conf_init); diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 5716487b8c9..d70d8c20229 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c @@ -1,5 +1,4 @@ /* - * drivers/s390/char/sclp_cpi.c * SCLP control programm identification * * Copyright IBM Corp. 2001, 2007 diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c index 62c2647f37f..2acea809e2a 100644 --- a/drivers/s390/char/sclp_cpi_sys.c +++ b/drivers/s390/char/sclp_cpi_sys.c @@ -1,5 +1,4 @@ /* - * drivers/s390/char/sclp_cpi_sys.c * SCLP control program identification sysfs interface * * Copyright IBM Corp. 2001, 2007 @@ -21,6 +20,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/completion.h> +#include <linux/export.h> #include <asm/ebcdic.h> #include <asm/sclp.h> @@ -102,7 +102,7 @@ static struct sclp_req *cpi_prepare_req(void) /* set system name */ set_data(evb->system_name, system_name); - /* set sytem level */ + /* set system level */ evb->system_level = system_level; /* set sysplex name */ diff --git a/drivers/s390/char/sclp_cpi_sys.h b/drivers/s390/char/sclp_cpi_sys.h index deef3e6ff49..65bb6a99c97 100644 --- a/drivers/s390/char/sclp_cpi_sys.h +++ b/drivers/s390/char/sclp_cpi_sys.h @@ -1,5 +1,4 @@ /* - * drivers/s390/char/sclp_cpi_sys.h * SCLP control program identification sysfs interface * * Copyright IBM Corp. 2007 diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c new file mode 100644 index 00000000000..648cb86afd4 --- /dev/null +++ b/drivers/s390/char/sclp_ctl.c @@ -0,0 +1,144 @@ +/* + * IOCTL interface for SCLP + * + * Copyright IBM Corp. 2012 + * + * Author: Michael Holzheu <holzheu@linux.vnet.ibm.com> + */ + +#include <linux/compat.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <linux/ioctl.h> +#include <linux/fs.h> +#include <asm/compat.h> +#include <asm/sclp_ctl.h> +#include <asm/sclp.h> + +#include "sclp.h" + +/* + * Supported command words + */ +static unsigned int sclp_ctl_sccb_wlist[] = { + 0x00400002, + 0x00410002, +}; + +/* + * Check if command word is supported + */ +static int sclp_ctl_cmdw_supported(unsigned int cmdw) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sclp_ctl_sccb_wlist); i++) { + if (cmdw == sclp_ctl_sccb_wlist[i]) + return 1; + } + return 0; +} + +static void __user *u64_to_uptr(u64 value) +{ + if (is_compat_task()) + return compat_ptr(value); + else + return (void __user *)(unsigned long)value; +} + +/* + * Start SCLP request + */ +static int sclp_ctl_ioctl_sccb(void __user *user_area) +{ + struct sclp_ctl_sccb ctl_sccb; + struct sccb_header *sccb; + int rc; + + if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb))) + return -EFAULT; + if (!sclp_ctl_cmdw_supported(ctl_sccb.cmdw)) + return -EOPNOTSUPP; + sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccb) + return -ENOMEM; + if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) { + rc = -EFAULT; + goto out_free; + } + if (sccb->length > PAGE_SIZE || sccb->length < 8) + return -EINVAL; + if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) { + rc = -EFAULT; + goto out_free; + } + rc = sclp_sync_request(ctl_sccb.cmdw, sccb); + if (rc) + goto out_free; + if (copy_to_user(u64_to_uptr(ctl_sccb.sccb), sccb, sccb->length)) + rc = -EFAULT; +out_free: + free_page((unsigned long) sccb); + return rc; +} + +/* + * SCLP SCCB ioctl function + */ +static long sclp_ctl_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + void __user *argp; + + if (is_compat_task()) + argp = compat_ptr(arg); + else + argp = (void __user *) arg; + switch (cmd) { + case SCLP_CTL_SCCB: + return sclp_ctl_ioctl_sccb(argp); + default: /* unknown ioctl number */ + return -ENOTTY; + } +} + +/* + * File operations + */ +static const struct file_operations sclp_ctl_fops = { + .owner = THIS_MODULE, + .open = nonseekable_open, + .unlocked_ioctl = sclp_ctl_ioctl, + .compat_ioctl = sclp_ctl_ioctl, + .llseek = no_llseek, +}; + +/* + * Misc device definition + */ +static struct miscdevice sclp_ctl_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sclp", + .fops = &sclp_ctl_fops, +}; + +/* + * Register sclp_ctl misc device + */ +static int __init sclp_ctl_init(void) +{ + return misc_register(&sclp_ctl_device); +} +module_init(sclp_ctl_init); + +/* + * Deregister sclp_ctl misc device + */ +static void __exit sclp_ctl_exit(void) +{ + misc_deregister(&sclp_ctl_device); +} +module_exit(sclp_ctl_exit); diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c new file mode 100644 index 00000000000..1918d9dff45 --- /dev/null +++ b/drivers/s390/char/sclp_early.c @@ -0,0 +1,315 @@ +/* + * SCLP early driver + * + * Copyright IBM Corp. 2013 + */ + +#define KMSG_COMPONENT "sclp_early" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <asm/ctl_reg.h> +#include <asm/sclp.h> +#include <asm/ipl.h> +#include "sclp_sdias.h" +#include "sclp.h" + +#define SCLP_CMDW_READ_SCP_INFO 0x00020001 +#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 + +struct read_info_sccb { + struct sccb_header header; /* 0-7 */ + u16 rnmax; /* 8-9 */ + u8 rnsize; /* 10 */ + u8 _reserved0[16 - 11]; /* 11-15 */ + u16 ncpurl; /* 16-17 */ + u16 cpuoff; /* 18-19 */ + u8 _reserved7[24 - 20]; /* 20-23 */ + u8 loadparm[8]; /* 24-31 */ + u8 _reserved1[48 - 32]; /* 32-47 */ + u64 facilities; /* 48-55 */ + u8 _reserved2a[76 - 56]; /* 56-75 */ + u32 ibc; /* 76-79 */ + u8 _reserved2b[84 - 80]; /* 80-83 */ + u8 fac84; /* 84 */ + u8 fac85; /* 85 */ + u8 _reserved3[91 - 86]; /* 86-90 */ + u8 flags; /* 91 */ + u8 _reserved4[100 - 92]; /* 92-99 */ + u32 rnsize2; /* 100-103 */ + u64 rnmax2; /* 104-111 */ + u8 _reserved5[120 - 112]; /* 112-119 */ + u16 hcpua; /* 120-121 */ + u8 _reserved6[4096 - 122]; /* 122-4095 */ +} __packed __aligned(PAGE_SIZE); + +static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata; +static unsigned int sclp_con_has_vt220 __initdata; +static unsigned int sclp_con_has_linemode __initdata; +static unsigned long sclp_hsa_size; +static unsigned int sclp_max_cpu; +static struct sclp_ipl_info sclp_ipl_info; +static unsigned char sclp_siif; +static u32 sclp_ibc; + +u64 sclp_facilities; +u8 sclp_fac84; +unsigned long long sclp_rzm; +unsigned long long sclp_rnmax; + +static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) +{ + int rc; + + __ctl_set_bit(0, 9); + rc = sclp_service_call(cmd, sccb); + if (rc) + goto out; + __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | + PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); + local_irq_disable(); +out: + /* Contents of the sccb might have changed. */ + barrier(); + __ctl_clear_bit(0, 9); + return rc; +} + +static int __init sclp_read_info_early(struct read_info_sccb *sccb) +{ + int rc, i; + sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, + SCLP_CMDW_READ_SCP_INFO}; + + for (i = 0; i < ARRAY_SIZE(commands); i++) { + do { + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->header.function_code = 0x80; + sccb->header.control_mask[2] = 0x80; + rc = sclp_cmd_sync_early(commands[i], sccb); + } while (rc == -EBUSY); + + if (rc) + break; + if (sccb->header.response_code == 0x10) + return 0; + if (sccb->header.response_code != 0x1f0) + break; + } + return -EIO; +} + +static void __init sclp_facilities_detect(struct read_info_sccb *sccb) +{ + struct sclp_cpu_entry *cpue; + u16 boot_cpu_address, cpu; + + if (sclp_read_info_early(sccb)) + return; + + sclp_facilities = sccb->facilities; + sclp_fac84 = sccb->fac84; + if (sccb->fac85 & 0x02) + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; + sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; + sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; + sclp_rzm <<= 20; + sclp_ibc = sccb->ibc; + + if (!sccb->hcpua) { + if (MACHINE_IS_VM) + sclp_max_cpu = 64; + else + sclp_max_cpu = sccb->ncpurl; + } else { + sclp_max_cpu = sccb->hcpua + 1; + } + + boot_cpu_address = stap(); + cpue = (void *)sccb + sccb->cpuoff; + for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) { + if (boot_cpu_address != cpue->address) + continue; + sclp_siif = cpue->siif; + break; + } + + /* Save IPL information */ + sclp_ipl_info.is_valid = 1; + if (sccb->flags & 0x2) + sclp_ipl_info.has_dump = 1; + memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN); +} + +bool __init sclp_has_linemode(void) +{ + return !!sclp_con_has_linemode; +} + +bool __init sclp_has_vt220(void) +{ + return !!sclp_con_has_vt220; +} + +unsigned long long sclp_get_rnmax(void) +{ + return sclp_rnmax; +} + +unsigned long long sclp_get_rzm(void) +{ + return sclp_rzm; +} + +unsigned int sclp_get_max_cpu(void) +{ + return sclp_max_cpu; +} + +int sclp_has_siif(void) +{ + return sclp_siif; +} +EXPORT_SYMBOL(sclp_has_siif); + +unsigned int sclp_get_ibc(void) +{ + return sclp_ibc; +} +EXPORT_SYMBOL(sclp_get_ibc); + +/* + * This function will be called after sclp_facilities_detect(), which gets + * called from early.c code. The sclp_facilities_detect() function retrieves + * and saves the IPL information. + */ +void __init sclp_get_ipl_info(struct sclp_ipl_info *info) +{ + *info = sclp_ipl_info; +} + +static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb) +{ + int rc; + + do { + rc = sclp_cmd_sync_early(cmd, sccb); + } while (rc == -EBUSY); + + if (rc) + return -EIO; + if (((struct sccb_header *) sccb)->response_code != 0x0020) + return -EIO; + return 0; +} + +static void __init sccb_init_eq_size(struct sdias_sccb *sccb) +{ + memset(sccb, 0, sizeof(*sccb)); + + sccb->hdr.length = sizeof(*sccb); + sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb->evbuf.hdr.type = EVTYP_SDIAS; + sccb->evbuf.event_qual = SDIAS_EQ_SIZE; + sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP; + sccb->evbuf.event_id = 4712; + sccb->evbuf.dbs = 1; +} + +static int __init sclp_set_event_mask(struct init_sccb *sccb, + unsigned long receive_mask, + unsigned long send_mask) +{ + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->mask_length = sizeof(sccb_mask_t); + sccb->receive_mask = receive_mask; + sccb->send_mask = send_mask; + return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); +} + +static long __init sclp_hsa_size_init(struct sdias_sccb *sccb) +{ + sccb_init_eq_size(sccb); + if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb)) + return -EIO; + if (sccb->evbuf.blk_cnt == 0) + return 0; + return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE; +} + +static long __init sclp_hsa_copy_wait(struct sccb_header *sccb) +{ + memset(sccb, 0, PAGE_SIZE); + sccb->length = PAGE_SIZE; + if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb)) + return -EIO; + if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0) + return 0; + return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE; +} + +unsigned long sclp_get_hsa_size(void) +{ + return sclp_hsa_size; +} + +static void __init sclp_hsa_size_detect(void *sccb) +{ + long size; + + /* First try synchronous interface (LPAR) */ + if (sclp_set_event_mask(sccb, 0, 0x40000010)) + return; + size = sclp_hsa_size_init(sccb); + if (size < 0) + return; + if (size != 0) + goto out; + /* Then try asynchronous interface (z/VM) */ + if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010)) + return; + size = sclp_hsa_size_init(sccb); + if (size < 0) + return; + size = sclp_hsa_copy_wait(sccb); + if (size < 0) + return; +out: + sclp_hsa_size = size; +} + +static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb) +{ + if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + return 1; +} + +static void __init sclp_console_detect(struct init_sccb *sccb) +{ + if (sccb->header.response_code != 0x20) + return; + + if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) + sclp_con_has_vt220 = 1; + + if (sclp_con_check_linemode(sccb)) + sclp_con_has_linemode = 1; +} + +void __init sclp_early_detect(void) +{ + void *sccb = &sccb_early; + + sclp_facilities_detect(sccb); + sclp_hsa_size_detect(sccb); + + /* Turn off SCLP event notifications. Also save remote masks in the + * sccb. These are sufficient to detect sclp console capabilities. + */ + sclp_set_event_mask(sccb, 0, 0); + sclp_console_detect(sccb); +} diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c new file mode 100644 index 00000000000..2553db0fdb5 --- /dev/null +++ b/drivers/s390/char/sclp_ocf.c @@ -0,0 +1,144 @@ +/* + * SCLP OCF communication parameters sysfs interface + * + * Copyright IBM Corp. 2011 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#define KMSG_COMPONENT "sclp_ocf" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/stat.h> +#include <linux/device.h> +#include <linux/string.h> +#include <linux/ctype.h> +#include <linux/kmod.h> +#include <linux/timer.h> +#include <linux/err.h> +#include <asm/ebcdic.h> +#include <asm/sclp.h> + +#include "sclp.h" + +#define OCF_LENGTH_HMC_NETWORK 8UL +#define OCF_LENGTH_CPC_NAME 8UL + +static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1]; +static char cpc_name[OCF_LENGTH_CPC_NAME + 1]; + +static DEFINE_SPINLOCK(sclp_ocf_lock); +static struct work_struct sclp_ocf_change_work; + +static struct kset *ocf_kset; + +static void sclp_ocf_change_notify(struct work_struct *work) +{ + kobject_uevent(&ocf_kset->kobj, KOBJ_CHANGE); +} + +/* Handler for OCF event. Look for the CPC image name. */ +static void sclp_ocf_handler(struct evbuf_header *evbuf) +{ + struct gds_vector *v; + struct gds_subvector *sv, *netid, *cpc; + size_t size; + + /* Find the 0x9f00 block. */ + v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length, + 0x9f00); + if (!v) + return; + /* Find the 0x9f22 block inside the 0x9f00 block. */ + v = sclp_find_gds_vector(v + 1, (void *) v + v->length, 0x9f22); + if (!v) + return; + /* Find the 0x81 block inside the 0x9f22 block. */ + sv = sclp_find_gds_subvector(v + 1, (void *) v + v->length, 0x81); + if (!sv) + return; + /* Find the 0x01 block inside the 0x81 block. */ + netid = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 1); + /* Find the 0x02 block inside the 0x81 block. */ + cpc = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 2); + /* Copy network name and cpc name. */ + spin_lock(&sclp_ocf_lock); + if (netid) { + size = min(OCF_LENGTH_HMC_NETWORK, (size_t) netid->length); + memcpy(hmc_network, netid + 1, size); + EBCASC(hmc_network, size); + hmc_network[size] = 0; + } + if (cpc) { + size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length); + memcpy(cpc_name, cpc + 1, size); + EBCASC(cpc_name, size); + cpc_name[size] = 0; + } + spin_unlock(&sclp_ocf_lock); + schedule_work(&sclp_ocf_change_work); +} + +static struct sclp_register sclp_ocf_event = { + .receive_mask = EVTYP_OCF_MASK, + .receiver_fn = sclp_ocf_handler, +}; + +static ssize_t cpc_name_show(struct kobject *kobj, + struct kobj_attribute *attr, char *page) +{ + int rc; + + spin_lock_irq(&sclp_ocf_lock); + rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name); + spin_unlock_irq(&sclp_ocf_lock); + return rc; +} + +static struct kobj_attribute cpc_name_attr = + __ATTR(cpc_name, 0444, cpc_name_show, NULL); + +static ssize_t hmc_network_show(struct kobject *kobj, + struct kobj_attribute *attr, char *page) +{ + int rc; + + spin_lock_irq(&sclp_ocf_lock); + rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network); + spin_unlock_irq(&sclp_ocf_lock); + return rc; +} + +static struct kobj_attribute hmc_network_attr = + __ATTR(hmc_network, 0444, hmc_network_show, NULL); + +static struct attribute *ocf_attrs[] = { + &cpc_name_attr.attr, + &hmc_network_attr.attr, + NULL, +}; + +static struct attribute_group ocf_attr_group = { + .attrs = ocf_attrs, +}; + +static int __init ocf_init(void) +{ + int rc; + + INIT_WORK(&sclp_ocf_change_work, sclp_ocf_change_notify); + ocf_kset = kset_create_and_add("ocf", NULL, firmware_kobj); + if (!ocf_kset) + return -ENOMEM; + + rc = sysfs_create_group(&ocf_kset->kobj, &ocf_attr_group); + if (rc) { + kset_unregister(ocf_kset); + return rc; + } + + return sclp_register(&sclp_ocf_event); +} + +device_initcall(ocf_init); diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index 05909a7df8b..475e470d976 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c @@ -1,8 +1,7 @@ /* - * drivers/s390/char/sclp_quiesce.c * signal quiesce handler * - * (C) Copyright IBM Corp. 1999,2004 + * Copyright IBM Corp. 1999, 2004 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ @@ -13,9 +12,8 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/reboot.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/ptrace.h> -#include <asm/sigp.h> #include <asm/smp.h> #include "sclp.h" @@ -30,7 +28,8 @@ static void do_machine_quiesce(void) psw_t quiesce_psw; smp_send_stop(); - quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; + quiesce_psw.mask = + PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT; quiesce_psw.addr = 0xfff; __load_psw(quiesce_psw); } diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 4be63be7344..3b13d58fe87 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c @@ -463,7 +463,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer, /* Use write priority message */ sccb->msg_buf.header.type = EVTYP_PMSGCMD; else - return -ENOSYS; + return -EOPNOTSUPP; buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; buffer->request.status = SCLP_REQ_FILLED; buffer->request.callback = sclp_writedata_callback; diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 6a1c58dc61a..561a0414b35 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -1,18 +1,20 @@ /* - * Sclp "store data in absolut storage" + * SCLP "store data in absolute storage" * - * Copyright IBM Corp. 2003,2007 + * Copyright IBM Corp. 2003, 2013 * Author(s): Michael Holzheu */ #define KMSG_COMPONENT "sclp_sdias" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/completion.h> #include <linux/sched.h> #include <asm/sclp.h> #include <asm/debug.h> #include <asm/ipl.h> +#include "sclp_sdias.h" #include "sclp.h" #include "sclp_rw.h" @@ -21,59 +23,36 @@ #define SDIAS_RETRIES 300 #define SDIAS_SLEEP_TICKS 50 -#define EQ_STORE_DATA 0x0 -#define EQ_SIZE 0x1 -#define DI_FCP_DUMP 0x0 -#define ASA_SIZE_32 0x0 -#define ASA_SIZE_64 0x1 -#define EVSTATE_ALL_STORED 0x0 -#define EVSTATE_NO_DATA 0x3 -#define EVSTATE_PART_STORED 0x10 - static struct debug_info *sdias_dbf; static struct sclp_register sclp_sdias_register = { .send_mask = EVTYP_SDIAS_MASK, }; -struct sdias_evbuf { - struct evbuf_header hdr; - u8 event_qual; - u8 data_id; - u64 reserved2; - u32 event_id; - u16 reserved3; - u8 asa_size; - u8 event_status; - u32 reserved4; - u32 blk_cnt; - u64 asa; - u32 reserved5; - u32 fbn; - u32 reserved6; - u32 lbn; - u16 reserved7; - u16 dbs; -} __attribute__((packed)); - -struct sdias_sccb { - struct sccb_header hdr; - struct sdias_evbuf evbuf; -} __attribute__((packed)); - static struct sdias_sccb sccb __attribute__((aligned(4096))); +static struct sdias_evbuf sdias_evbuf; -static int sclp_req_done; -static wait_queue_head_t sdias_wq; +static DECLARE_COMPLETION(evbuf_accepted); +static DECLARE_COMPLETION(evbuf_done); static DEFINE_MUTEX(sdias_mutex); -static void sdias_callback(struct sclp_req *request, void *data) +/* + * Called by SCLP base when read event data has been completed (async mode only) + */ +static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf) { - struct sdias_sccb *cbsccb; + memcpy(&sdias_evbuf, evbuf, + min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length)); + complete(&evbuf_done); + TRACE("sclp_sdias_receiver_fn done\n"); +} - cbsccb = (struct sdias_sccb *) request->sccb; - sclp_req_done = 1; - wake_up(&sdias_wq); /* Inform caller, that request is complete */ +/* + * Called by SCLP base when sdias event has been accepted + */ +static void sdias_callback(struct sclp_req *request, void *data) +{ + complete(&evbuf_accepted); TRACE("callback done\n"); } @@ -83,7 +62,6 @@ static int sdias_sclp_send(struct sclp_req *req) int rc; for (retries = SDIAS_RETRIES; retries; retries--) { - sclp_req_done = 0; TRACE("add request\n"); rc = sclp_add_request(req); if (rc) { @@ -94,16 +72,31 @@ static int sdias_sclp_send(struct sclp_req *req) continue; } /* initiated, wait for completion of service call */ - wait_event(sdias_wq, (sclp_req_done == 1)); + wait_for_completion(&evbuf_accepted); if (req->status == SCLP_REQ_FAILED) { TRACE("sclp request failed\n"); - rc = -EIO; continue; } + /* if not accepted, retry */ + if (!(sccb.evbuf.hdr.flags & 0x80)) { + TRACE("sclp request failed: flags=%x\n", + sccb.evbuf.hdr.flags); + continue; + } + /* + * for the sync interface the response is in the initial sccb + */ + if (!sclp_sdias_register.receiver_fn) { + memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf)); + TRACE("sync request done\n"); + return 0; + } + /* otherwise we wait for completion */ + wait_for_completion(&evbuf_done); TRACE("request done\n"); - break; + return 0; } - return rc; + return -EIO; } /* @@ -122,8 +115,8 @@ int sclp_sdias_blk_count(void) sccb.hdr.length = sizeof(sccb); sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); sccb.evbuf.hdr.type = EVTYP_SDIAS; - sccb.evbuf.event_qual = EQ_SIZE; - sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_qual = SDIAS_EQ_SIZE; + sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; sccb.evbuf.dbs = 1; @@ -143,13 +136,12 @@ int sclp_sdias_blk_count(void) goto out; } - switch (sccb.evbuf.event_status) { + switch (sdias_evbuf.event_status) { case 0: - rc = sccb.evbuf.blk_cnt; + rc = sdias_evbuf.blk_cnt; break; default: - pr_err("SCLP error: %x\n", - sccb.evbuf.event_status); + pr_err("SCLP error: %x\n", sdias_evbuf.event_status); rc = -EIO; goto out; } @@ -183,13 +175,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); sccb.evbuf.hdr.type = EVTYP_SDIAS; sccb.evbuf.hdr.flags = 0; - sccb.evbuf.event_qual = EQ_STORE_DATA; - sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA; + sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; -#ifdef __s390x__ - sccb.evbuf.asa_size = ASA_SIZE_64; +#ifdef CONFIG_64BIT + sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64; #else - sccb.evbuf.asa_size = ASA_SIZE_32; + sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32; #endif sccb.evbuf.event_status = 0; sccb.evbuf.blk_cnt = nr_blks; @@ -214,38 +206,70 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) goto out; } - switch (sccb.evbuf.event_status) { - case EVSTATE_ALL_STORED: - TRACE("all stored\n"); - case EVSTATE_PART_STORED: - TRACE("part stored: %i\n", sccb.evbuf.blk_cnt); - break; - case EVSTATE_NO_DATA: - TRACE("no data\n"); - default: - pr_err("Error from SCLP while copying hsa. " - "Event status = %x\n", - sccb.evbuf.event_status); - rc = -EIO; + switch (sdias_evbuf.event_status) { + case SDIAS_EVSTATE_ALL_STORED: + TRACE("all stored\n"); + break; + case SDIAS_EVSTATE_PART_STORED: + TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); + break; + case SDIAS_EVSTATE_NO_DATA: + TRACE("no data\n"); + /* fall through */ + default: + pr_err("Error from SCLP while copying hsa. Event status = %x\n", + sdias_evbuf.event_status); + rc = -EIO; } out: mutex_unlock(&sdias_mutex); return rc; } -int __init sclp_sdias_init(void) +static int __init sclp_sdias_register_check(void) { int rc; + rc = sclp_register(&sclp_sdias_register); + if (rc) + return rc; + if (sclp_sdias_blk_count() == 0) { + sclp_unregister(&sclp_sdias_register); + return -ENODEV; + } + return 0; +} + +static int __init sclp_sdias_init_sync(void) +{ + TRACE("Try synchronous mode\n"); + sclp_sdias_register.receive_mask = 0; + sclp_sdias_register.receiver_fn = NULL; + return sclp_sdias_register_check(); +} + +static int __init sclp_sdias_init_async(void) +{ + TRACE("Try asynchronous mode\n"); + sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK; + sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn; + return sclp_sdias_register_check(); +} + +int __init sclp_sdias_init(void) +{ if (ipl_info.type != IPL_TYPE_FCP_DUMP) return 0; sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long)); debug_register_view(sdias_dbf, &debug_sprintf_view); debug_set_level(sdias_dbf, 6); - rc = sclp_register(&sclp_sdias_register); - if (rc) - return rc; - init_waitqueue_head(&sdias_wq); + if (sclp_sdias_init_sync() == 0) + goto out; + if (sclp_sdias_init_async() == 0) + goto out; + TRACE("init failed\n"); + return -ENODEV; +out: TRACE("init done\n"); return 0; } diff --git a/drivers/s390/char/sclp_sdias.h b/drivers/s390/char/sclp_sdias.h new file mode 100644 index 00000000000..f2431c41415 --- /dev/null +++ b/drivers/s390/char/sclp_sdias.h @@ -0,0 +1,46 @@ +/* + * SCLP "store data in absolute storage" + * + * Copyright IBM Corp. 2003, 2013 + */ + +#ifndef SCLP_SDIAS_H +#define SCLP_SDIAS_H + +#include "sclp.h" + +#define SDIAS_EQ_STORE_DATA 0x0 +#define SDIAS_EQ_SIZE 0x1 +#define SDIAS_DI_FCP_DUMP 0x0 +#define SDIAS_ASA_SIZE_32 0x0 +#define SDIAS_ASA_SIZE_64 0x1 +#define SDIAS_EVSTATE_ALL_STORED 0x0 +#define SDIAS_EVSTATE_NO_DATA 0x3 +#define SDIAS_EVSTATE_PART_STORED 0x10 + +struct sdias_evbuf { + struct evbuf_header hdr; + u8 event_qual; + u8 data_id; + u64 reserved2; + u32 event_id; + u16 reserved3; + u8 asa_size; + u8 event_status; + u32 reserved4; + u32 blk_cnt; + u64 asa; + u32 reserved5; + u32 fbn; + u32 reserved6; + u32 lbn; + u16 reserved7; + u16 dbs; +} __packed; + +struct sdias_sccb { + struct sccb_header hdr; + struct sdias_evbuf evbuf; +} __packed; + +#endif /* SCLP_SDIAS_H */ diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 434ba04b130..7ed7a598781 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -1,9 +1,8 @@ /* - * drivers/s390/char/sclp_tty.c * SCLP line mode terminal driver. * * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 1999 * Author(s): Martin Peschke <mpeschke@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> */ @@ -13,10 +12,10 @@ #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> -#include <linux/slab.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/gfp.h> #include <asm/uaccess.h> #include "ctrlchar.h" @@ -48,7 +47,7 @@ static struct sclp_buffer *sclp_ttybuf; /* Timer for delayed output of console messages. */ static struct timer_list sclp_tty_timer; -static struct tty_struct *sclp_tty; +static struct tty_port sclp_port; static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; static unsigned short int sclp_tty_chars_count; @@ -64,9 +63,9 @@ static int sclp_tty_columns = 80; static int sclp_tty_open(struct tty_struct *tty, struct file *filp) { - sclp_tty = tty; + tty_port_tty_set(&sclp_port, tty); tty->driver_data = NULL; - tty->low_latency = 0; + sclp_port.low_latency = 0; return 0; } @@ -76,7 +75,7 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp) { if (tty->count > 1) return; - sclp_tty = NULL; + tty_port_tty_set(&sclp_port, NULL); } /* @@ -125,10 +124,8 @@ sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc) struct sclp_buffer, list); spin_unlock_irqrestore(&sclp_tty_lock, flags); } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback)); - /* check if the tty needs a wake up call */ - if (sclp_tty != NULL) { - tty_wakeup(sclp_tty); - } + + tty_port_tty_wakeup(&sclp_port); } static inline void @@ -326,21 +323,22 @@ sclp_tty_flush_buffer(struct tty_struct *tty) static void sclp_tty_input(unsigned char* buf, unsigned int count) { + struct tty_struct *tty = tty_port_tty_get(&sclp_port); unsigned int cchar; /* * If this tty driver is currently closed * then throw the received input away. */ - if (sclp_tty == NULL) + if (tty == NULL) return; - cchar = ctrlchar_handle(buf, count, sclp_tty); + cchar = ctrlchar_handle(buf, count, tty); switch (cchar & CTRLCHAR_MASK) { case CTRLCHAR_SYSRQ: break; case CTRLCHAR_CTRL: - tty_insert_flip_char(sclp_tty, cchar, TTY_NORMAL); - tty_flip_buffer_push(sclp_tty); + tty_insert_flip_char(&sclp_port, cchar, TTY_NORMAL); + tty_flip_buffer_push(&sclp_port); break; case CTRLCHAR_NONE: /* send (normal) input to line discipline */ @@ -348,13 +346,14 @@ sclp_tty_input(unsigned char* buf, unsigned int count) (strncmp((const char *) buf + count - 2, "^n", 2) && strncmp((const char *) buf + count - 2, "\252n", 2))) { /* add the auto \n */ - tty_insert_flip_string(sclp_tty, buf, count); - tty_insert_flip_char(sclp_tty, '\n', TTY_NORMAL); + tty_insert_flip_string(&sclp_port, buf, count); + tty_insert_flip_char(&sclp_port, '\n', TTY_NORMAL); } else - tty_insert_flip_string(sclp_tty, buf, count - 2); - tty_flip_buffer_push(sclp_tty); + tty_insert_flip_string(&sclp_port, buf, count - 2); + tty_flip_buffer_push(&sclp_port); break; } + tty_kref_put(tty); } /* @@ -408,118 +407,72 @@ static int sclp_switch_cases(unsigned char *buf, int count) return op - buf; } -static void -sclp_get_input(unsigned char *start, unsigned char *end) +static void sclp_get_input(struct gds_subvector *sv) { + unsigned char *str; int count; - count = end - start; + str = (unsigned char *) (sv + 1); + count = sv->length - sizeof(*sv); if (sclp_tty_tolower) - EBC_TOLOWER(start, count); - count = sclp_switch_cases(start, count); + EBC_TOLOWER(str, count); + count = sclp_switch_cases(str, count); /* convert EBCDIC to ASCII (modify original input in SCCB) */ - sclp_ebcasc_str(start, count); + sclp_ebcasc_str(str, count); /* transfer input to high level driver */ - sclp_tty_input(start, count); + sclp_tty_input(str, count); } -static inline struct gds_vector * -find_gds_vector(struct gds_vector *start, struct gds_vector *end, u16 id) +static inline void sclp_eval_selfdeftextmsg(struct gds_subvector *sv) { - struct gds_vector *vec; + void *end; - for (vec = start; vec < end; vec = (void *) vec + vec->length) - if (vec->gds_id == id) - return vec; - return NULL; + end = (void *) sv + sv->length; + for (sv = sv + 1; (void *) sv < end; sv = (void *) sv + sv->length) + if (sv->key == 0x30) + sclp_get_input(sv); } -static inline struct gds_subvector * -find_gds_subvector(struct gds_subvector *start, - struct gds_subvector *end, u8 key) +static inline void sclp_eval_textcmd(struct gds_vector *v) { - struct gds_subvector *subvec; + struct gds_subvector *sv; + void *end; - for (subvec = start; subvec < end; - subvec = (void *) subvec + subvec->length) - if (subvec->key == key) - return subvec; - return NULL; -} - -static inline void -sclp_eval_selfdeftextmsg(struct gds_subvector *start, - struct gds_subvector *end) -{ - struct gds_subvector *subvec; + end = (void *) v + v->length; + for (sv = (struct gds_subvector *) (v + 1); + (void *) sv < end; sv = (void *) sv + sv->length) + if (sv->key == GDS_KEY_SELFDEFTEXTMSG) + sclp_eval_selfdeftextmsg(sv); - subvec = start; - while (subvec < end) { - subvec = find_gds_subvector(subvec, end, 0x30); - if (!subvec) - break; - sclp_get_input((unsigned char *)(subvec + 1), - (unsigned char *) subvec + subvec->length); - subvec = (void *) subvec + subvec->length; - } } -static inline void -sclp_eval_textcmd(struct gds_subvector *start, - struct gds_subvector *end) +static inline void sclp_eval_cpmsu(struct gds_vector *v) { - struct gds_subvector *subvec; + void *end; - subvec = start; - while (subvec < end) { - subvec = find_gds_subvector(subvec, end, - GDS_KEY_SELFDEFTEXTMSG); - if (!subvec) - break; - sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), - (void *)subvec + subvec->length); - subvec = (void *) subvec + subvec->length; - } + end = (void *) v + v->length; + for (v = v + 1; (void *) v < end; v = (void *) v + v->length) + if (v->gds_id == GDS_ID_TEXTCMD) + sclp_eval_textcmd(v); } -static inline void -sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end) -{ - struct gds_vector *vec; - - vec = start; - while (vec < end) { - vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD); - if (!vec) - break; - sclp_eval_textcmd((struct gds_subvector *)(vec + 1), - (void *) vec + vec->length); - vec = (void *) vec + vec->length; - } -} - -static inline void -sclp_eval_mdsmu(struct gds_vector *start, void *end) +static inline void sclp_eval_mdsmu(struct gds_vector *v) { - struct gds_vector *vec; - - vec = find_gds_vector(start, end, GDS_ID_CPMSU); - if (vec) - sclp_eval_cpmsu(vec + 1, (void *) vec + vec->length); + v = sclp_find_gds_vector(v + 1, (void *) v + v->length, GDS_ID_CPMSU); + if (v) + sclp_eval_cpmsu(v); } -static void -sclp_tty_receiver(struct evbuf_header *evbuf) +static void sclp_tty_receiver(struct evbuf_header *evbuf) { - struct gds_vector *start, *end, *vec; + struct gds_vector *v; - start = (struct gds_vector *)(evbuf + 1); - end = (void *) evbuf + evbuf->length; - vec = find_gds_vector(start, end, GDS_ID_MDSMU); - if (vec) - sclp_eval_mdsmu(vec + 1, (void *) vec + vec->length); + v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length, + GDS_ID_MDSMU); + if (v) + sclp_eval_mdsmu(v); } static void @@ -589,7 +542,6 @@ sclp_tty_init(void) sclp_tty_tolower = 1; } sclp_tty_chars_count = 0; - sclp_tty = NULL; rc = sclp_register(&sclp_input_event); if (rc) { @@ -597,7 +549,8 @@ sclp_tty_init(void) return rc; } - driver->owner = THIS_MODULE; + tty_port_init(&sclp_port); + driver->driver_name = "sclp_line"; driver->name = "sclp_line"; driver->major = TTY_MAJOR; @@ -610,9 +563,11 @@ sclp_tty_init(void) driver->init_termios.c_lflag = ISIG | ECHO; driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(driver, &sclp_ops); + tty_port_link_device(&sclp_port, driver, 0); rc = tty_register_driver(driver); if (rc) { put_tty_driver(driver); + tty_port_destroy(&sclp_port); return rc; } sclp_tty_driver = driver; diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h index 4b965b22fec..c8773421c31 100644 --- a/drivers/s390/char/sclp_tty.h +++ b/drivers/s390/char/sclp_tty.h @@ -1,9 +1,8 @@ /* - * drivers/s390/char/sclp_tty.h * interface to the SCLP-read/write driver * * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 1999 * Author(s): Martin Peschke <mpeschke@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> */ diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 3796ffdb847..b9a9f721716 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -23,6 +23,7 @@ #include <linux/interrupt.h> #include <linux/init.h> #include <linux/reboot.h> +#include <linux/slab.h> #include <asm/uaccess.h> #include "sclp.h" @@ -33,7 +34,6 @@ #define SCLP_VT220_DEVICE_NAME "ttysclp" #define SCLP_VT220_CONSOLE_NAME "ttyS" #define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */ -#define SCLP_VT220_BUF_SIZE 80 /* Representation of a single write request */ struct sclp_vt220_request { @@ -55,8 +55,7 @@ struct sclp_vt220_sccb { /* Structures and data needed to register tty driver */ static struct tty_driver *sclp_vt220_driver; -/* The tty_struct that the kernel associated with us */ -static struct tty_struct *sclp_vt220_tty; +static struct tty_port sclp_vt220_port; /* Lock to protect internal data from concurrent access */ static spinlock_t sclp_vt220_lock; @@ -98,13 +97,16 @@ static void sclp_vt220_pm_event_fn(struct sclp_register *reg, static int __sclp_vt220_emit(struct sclp_vt220_request *request); static void sclp_vt220_emit_current(void); -/* Registration structure for our interest in SCLP event buffers */ +/* Registration structure for SCLP output event buffers */ static struct sclp_register sclp_vt220_register = { .send_mask = EVTYP_VT220MSG_MASK, + .pm_event_fn = sclp_vt220_pm_event_fn, +}; + +/* Registration structure for SCLP input event buffers */ +static struct sclp_register sclp_vt220_register_input = { .receive_mask = EVTYP_VT220MSG_MASK, - .state_change_fn = NULL, .receiver_fn = sclp_vt220_receiver_fn, - .pm_event_fn = sclp_vt220_pm_event_fn, }; @@ -139,10 +141,7 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request) } while (__sclp_vt220_emit(request)); if (request == NULL && sclp_vt220_flush_later) sclp_vt220_emit_current(); - /* Check if the tty needs a wake up call */ - if (sclp_vt220_tty != NULL) { - tty_wakeup(sclp_vt220_tty); - } + tty_port_tty_wakeup(&sclp_vt220_port); } #define SCLP_BUFFER_MAX_RETRY 1 @@ -366,6 +365,31 @@ sclp_vt220_timeout(unsigned long data) #define BUFFER_MAX_DELAY HZ/20 +/* + * Drop oldest console buffer if sclp_con_drop is set + */ +static int +sclp_vt220_drop_buffer(void) +{ + struct list_head *list; + struct sclp_vt220_request *request; + void *page; + + if (!sclp_console_drop) + return 0; + list = sclp_vt220_outqueue.next; + if (sclp_vt220_queue_running) + /* The first element is in I/O */ + list = list->next; + if (list == &sclp_vt220_outqueue) + return 0; + list_del(list); + request = list_entry(list, struct sclp_vt220_request, list); + page = request->sclp_req.sccb; + list_add_tail((struct list_head *) page, &sclp_vt220_empty); + return 1; +} + /* * Internal implementation of the write function. Write COUNT bytes of data * from memory at BUF @@ -394,12 +418,16 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, do { /* Create an sclp output buffer if none exists yet */ if (sclp_vt220_current_request == NULL) { + if (list_empty(&sclp_vt220_empty)) + sclp_console_full++; while (list_empty(&sclp_vt220_empty)) { - spin_unlock_irqrestore(&sclp_vt220_lock, flags); if (may_fail || sclp_vt220_suspended) goto out; - else - sclp_sync_wait(); + if (sclp_vt220_drop_buffer()) + break; + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + + sclp_sync_wait(); spin_lock_irqsave(&sclp_vt220_lock, flags); } page = (void *) sclp_vt220_empty.next; @@ -432,8 +460,8 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY; add_timer(&sclp_vt220_timer); } - spin_unlock_irqrestore(&sclp_vt220_lock, flags); out: + spin_unlock_irqrestore(&sclp_vt220_lock, flags); return overall_written; } @@ -462,10 +490,6 @@ sclp_vt220_receiver_fn(struct evbuf_header *evbuf) char *buffer; unsigned int count; - /* Ignore input if device is not open */ - if (sclp_vt220_tty == NULL) - return; - buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header)); count = evbuf->length - sizeof(struct evbuf_header); @@ -477,8 +501,8 @@ sclp_vt220_receiver_fn(struct evbuf_header *evbuf) /* Send input to line discipline */ buffer++; count--; - tty_insert_flip_string(sclp_vt220_tty, buffer, count); - tty_flip_buffer_push(sclp_vt220_tty); + tty_insert_flip_string(&sclp_vt220_port, buffer, count); + tty_flip_buffer_push(&sclp_vt220_port); break; } } @@ -490,11 +514,8 @@ static int sclp_vt220_open(struct tty_struct *tty, struct file *filp) { if (tty->count == 1) { - sclp_vt220_tty = tty; - tty->driver_data = kmalloc(SCLP_VT220_BUF_SIZE, GFP_KERNEL); - if (tty->driver_data == NULL) - return -ENOMEM; - tty->low_latency = 0; + tty_port_tty_set(&sclp_vt220_port, tty); + sclp_vt220_port.low_latency = 0; if (!tty->winsize.ws_row && !tty->winsize.ws_col) { tty->winsize.ws_row = 24; tty->winsize.ws_col = 80; @@ -509,11 +530,8 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp) static void sclp_vt220_close(struct tty_struct *tty, struct file *filp) { - if (tty->count == 1) { - sclp_vt220_tty = NULL; - kfree(tty->driver_data); - tty->driver_data = NULL; - } + if (tty->count == 1) + tty_port_tty_set(&sclp_vt220_port, NULL); } /* @@ -617,6 +635,7 @@ static void __init __sclp_vt220_cleanup(void) return; sclp_unregister(&sclp_vt220_register); __sclp_vt220_free_pages(); + tty_port_destroy(&sclp_vt220_port); } /* Allocate buffer pages and register with sclp core. Controlled by init @@ -634,9 +653,9 @@ static int __init __sclp_vt220_init(int num_pages) INIT_LIST_HEAD(&sclp_vt220_empty); INIT_LIST_HEAD(&sclp_vt220_outqueue); init_timer(&sclp_vt220_timer); + tty_port_init(&sclp_vt220_port); sclp_vt220_current_request = NULL; sclp_vt220_buffered_chars = 0; - sclp_vt220_tty = NULL; sclp_vt220_flush_later = 0; /* Allocate pages for output buffering */ @@ -652,6 +671,7 @@ out: if (rc) { __sclp_vt220_free_pages(); sclp_vt220_init_count--; + tty_port_destroy(&sclp_vt220_port); } return rc; } @@ -684,7 +704,6 @@ static int __init sclp_vt220_tty_init(void) if (rc) goto out_driver; - driver->owner = THIS_MODULE; driver->driver_name = SCLP_VT220_DRIVER_NAME; driver->name = SCLP_VT220_DEVICE_NAME; driver->major = SCLP_VT220_MAJOR; @@ -694,13 +713,19 @@ static int __init sclp_vt220_tty_init(void) driver->init_termios = tty_std_termios; driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(driver, &sclp_vt220_ops); + tty_port_link_device(&sclp_vt220_port, driver, 0); rc = tty_register_driver(driver); if (rc) goto out_init; + rc = sclp_register(&sclp_vt220_register_input); + if (rc) + goto out_reg; sclp_vt220_driver = driver; return 0; +out_reg: + tty_unregister_driver(driver); out_init: __sclp_vt220_cleanup(); out_driver: @@ -813,9 +838,7 @@ sclp_vt220_con_init(void) { int rc; - if (!CONSOLE_IS_SCLP) - return 0; - rc = __sclp_vt220_init(MAX_CONSOLE_PAGES); + rc = __sclp_vt220_init(sclp_console_pages); if (rc) return rc; /* Attach linux console */ diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 7a242f07363..ea664dd4f56 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h @@ -1,5 +1,4 @@ /* - * drivers/s390/char/tape.h * tape device driver for 3480/3490E/3590 tapes. * * S390 and zSeries version @@ -16,7 +15,6 @@ #include <asm/ccwdev.h> #include <asm/debug.h> #include <asm/idals.h> -#include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mtio.h> @@ -154,12 +152,6 @@ struct tape_discipline { struct tape_request *(*read_block)(struct tape_device *, size_t); struct tape_request *(*write_block)(struct tape_device *, size_t); void (*process_eov)(struct tape_device*); -#ifdef CONFIG_S390_TAPE_BLOCK - /* Block device stuff. */ - struct tape_request *(*bread)(struct tape_device *, struct request *); - void (*check_locate)(struct tape_device *, struct tape_request *); - void (*free_bread)(struct tape_request *); -#endif /* ioctl function for additional ioctls. */ int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long); /* Array of tape commands with TAPE_NR_MTOPS entries */ @@ -182,26 +174,6 @@ struct tape_char_data { int block_size; /* of size block_size. */ }; -#ifdef CONFIG_S390_TAPE_BLOCK -/* Block Frontend Data */ -struct tape_blk_data -{ - struct tape_device * device; - /* Block device request queue. */ - struct request_queue * request_queue; - spinlock_t request_queue_lock; - - /* Task to move entries from block request to CCS request queue. */ - struct work_struct requeue_task; - atomic_t requeue_scheduled; - - /* Current position on the tape. */ - long block_position; - int medium_changed; - struct gendisk * disk; -}; -#endif - /* Tape Info */ struct tape_device { /* entry in tape_device_list */ @@ -248,10 +220,6 @@ struct tape_device { /* Character device frontend data */ struct tape_char_data char_data; -#ifdef CONFIG_S390_TAPE_BLOCK - /* Block dev frontend data */ - struct tape_blk_data blk_data; -#endif /* Function to start or stop the next request later. */ struct delayed_work tape_dnr; @@ -280,6 +248,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request) return rc; } +static inline void +tape_do_io_async_free(struct tape_device *device, struct tape_request *request) +{ + request->callback = (void *) tape_free_request; + request->callback_data = NULL; + tape_do_io_async(device, request); +} + extern int tape_oper_handler(int irq, int status); extern void tape_noper_handler(int irq, int status); extern int tape_open(struct tape_device *); @@ -305,19 +281,6 @@ extern void tapechar_exit(void); extern int tapechar_setup_device(struct tape_device *); extern void tapechar_cleanup_device(struct tape_device *); -/* Externals from tape_block.c */ -#ifdef CONFIG_S390_TAPE_BLOCK -extern int tapeblock_init (void); -extern void tapeblock_exit(void); -extern int tapeblock_setup_device(struct tape_device *); -extern void tapeblock_cleanup_device(struct tape_device *); -#else -static inline int tapeblock_init (void) {return 0;} -static inline void tapeblock_exit (void) {;} -static inline int tapeblock_setup_device(struct tape_device *t) {return 0;} -static inline void tapeblock_cleanup_device (struct tape_device *t) {;} -#endif - /* tape initialisation functions */ #ifdef CONFIG_PROC_FS extern void tape_proc_init (void); diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index cb70fa1cf53..9aa79702b37 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c @@ -1,5 +1,4 @@ /* - * drivers/s390/char/tape_34xx.c * tape device discipline for 3480/3490 tapes. * * Copyright IBM Corp. 2001, 2009 @@ -15,6 +14,7 @@ #include <linux/init.h> #include <linux/bio.h> #include <linux/workqueue.h> +#include <linux/slab.h> #define TAPE_DBF_AREA tape_34xx_dbf @@ -52,23 +52,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int); * Medium sense for 34xx tapes. There is no 'real' medium sense call. * So we just do a normal sense. */ -static int -tape_34xx_medium_sense(struct tape_device *device) +static void __tape_34xx_medium_sense(struct tape_request *request) { - struct tape_request *request; - unsigned char *sense; - int rc; - - request = tape_alloc_request(1, 32); - if (IS_ERR(request)) { - DBF_EXCEPTION(6, "MSEN fail\n"); - return PTR_ERR(request); - } - - request->op = TO_MSEN; - tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); + struct tape_device *device = request->device; + unsigned char *sense; - rc = tape_do_io_interruptible(device, request); if (request->rc == 0) { sense = request->cpdata; @@ -87,15 +75,47 @@ tape_34xx_medium_sense(struct tape_device *device) device->tape_generic_status |= GMT_WR_PROT(~0); else device->tape_generic_status &= ~GMT_WR_PROT(~0); - } else { + } else DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", request->rc); - } tape_free_request(request); +} + +static int tape_34xx_medium_sense(struct tape_device *device) +{ + struct tape_request *request; + int rc; + + request = tape_alloc_request(1, 32); + if (IS_ERR(request)) { + DBF_EXCEPTION(6, "MSEN fail\n"); + return PTR_ERR(request); + } + request->op = TO_MSEN; + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); + rc = tape_do_io_interruptible(device, request); + __tape_34xx_medium_sense(request); return rc; } +static void tape_34xx_medium_sense_async(struct tape_device *device) +{ + struct tape_request *request; + + request = tape_alloc_request(1, 32); + if (IS_ERR(request)) { + DBF_EXCEPTION(6, "MSEN fail\n"); + return; + } + + request->op = TO_MSEN; + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); + request->callback = (void *) __tape_34xx_medium_sense; + request->callback_data = NULL; + tape_do_io_async(device, request); +} + struct tape_34xx_work { struct tape_device *device; enum tape_op op; @@ -108,6 +128,9 @@ struct tape_34xx_work { * is inserted but cannot call tape_do_io* from an interrupt context. * Maybe that's useful for other actions we want to start from the * interrupt handler. + * Note: the work handler is called by the system work queue. The tape + * commands started by the handler need to be asynchrounous, otherwise + * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). */ static void tape_34xx_work_handler(struct work_struct *work) @@ -118,7 +141,7 @@ tape_34xx_work_handler(struct work_struct *work) switch(p->op) { case TO_MSEN: - tape_34xx_medium_sense(device); + tape_34xx_medium_sense_async(device); break; default: DBF_EVENT(3, "T34XX: internal error: unknown work\n"); @@ -299,20 +322,6 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request, inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0; sense = irb->ecw; -#ifdef CONFIG_S390_TAPE_BLOCK - if (request->op == TO_BLOCK) { - /* - * Recovery for block device requests. Set the block_position - * to something invalid and retry. - */ - device->blk_data.block_position = -1; - if (request->retries-- <= 0) - return tape_34xx_erp_failed(request, -EIO); - else - return tape_34xx_erp_retry(request); - } -#endif - if ( sense[0] & SENSE_COMMAND_REJECT && sense[1] & SENSE_WRITE_PROTECT @@ -1105,123 +1114,6 @@ tape_34xx_mtseek(struct tape_device *device, int mt_count) return tape_do_io_free(device, request); } -#ifdef CONFIG_S390_TAPE_BLOCK -/* - * Tape block read for 34xx. - */ -static struct tape_request * -tape_34xx_bread(struct tape_device *device, struct request *req) -{ - struct tape_request *request; - struct ccw1 *ccw; - int count = 0; - unsigned off; - char *dst; - struct bio_vec *bv; - struct req_iterator iter; - struct tape_34xx_block_id * start_block; - - DBF_EVENT(6, "xBREDid:"); - - /* Count the number of blocks for the request. */ - rq_for_each_segment(bv, req, iter) - count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); - - /* Allocate the ccw request. */ - request = tape_alloc_request(3+count+1, 8); - if (IS_ERR(request)) - return request; - - /* Setup ccws. */ - request->op = TO_BLOCK; - start_block = (struct tape_34xx_block_id *) request->cpdata; - start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; - DBF_EVENT(6, "start_block = %i\n", start_block->block); - - ccw = request->cpaddr; - ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); - - /* - * We always setup a nop after the mode set ccw. This slot is - * used in tape_std_check_locate to insert a locate ccw if the - * current tape position doesn't match the start block to be read. - * The second nop will be filled with a read block id which is in - * turn used by tape_34xx_free_bread to populate the segment bid - * table. - */ - ccw = tape_ccw_cc(ccw, NOP, 0, NULL); - ccw = tape_ccw_cc(ccw, NOP, 0, NULL); - - rq_for_each_segment(bv, req, iter) { - dst = kmap(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) { - ccw->flags = CCW_FLAG_CC; - ccw->cmd_code = READ_FORWARD; - ccw->count = TAPEBLOCK_HSEC_SIZE; - set_normalized_cda(ccw, (void*) __pa(dst)); - ccw++; - dst += TAPEBLOCK_HSEC_SIZE; - } - } - - ccw = tape_ccw_end(ccw, NOP, 0, NULL); - DBF_EVENT(6, "xBREDccwg\n"); - return request; -} - -static void -tape_34xx_free_bread (struct tape_request *request) -{ - struct ccw1* ccw; - - ccw = request->cpaddr; - if ((ccw + 2)->cmd_code == READ_BLOCK_ID) { - struct { - struct tape_34xx_block_id cbid; - struct tape_34xx_block_id dbid; - } __attribute__ ((packed)) *rbi_data; - - rbi_data = request->cpdata; - - if (request->device) - tape_34xx_add_sbid(request->device, rbi_data->cbid); - } - - /* Last ccw is a nop and doesn't need clear_normalized_cda */ - for (; ccw->flags & CCW_FLAG_CC; ccw++) - if (ccw->cmd_code == READ_FORWARD) - clear_normalized_cda(ccw); - tape_free_request(request); -} - -/* - * check_locate is called just before the tape request is passed to - * the common io layer for execution. It has to check the current - * tape position and insert a locate ccw if it doesn't match the - * start block for the request. - */ -static void -tape_34xx_check_locate(struct tape_device *device, struct tape_request *request) -{ - struct tape_34xx_block_id * start_block; - - start_block = (struct tape_34xx_block_id *) request->cpdata; - if (start_block->block == device->blk_data.block_position) - return; - - DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof); - start_block->wrap = 0; - start_block->segment = 1; - start_block->format = (*device->modeset_byte & 0x08) ? - TAPE34XX_FMT_3480_XF : - TAPE34XX_FMT_3480; - start_block->block = start_block->block + device->bof; - tape_34xx_merge_sbid(device, start_block); - tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); - tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata); -} -#endif - /* * List of 3480/3490 magnetic tape commands. */ @@ -1271,11 +1163,6 @@ static struct tape_discipline tape_discipline_34xx = { .irq = tape_34xx_irq, .read_block = tape_std_read_block, .write_block = tape_std_write_block, -#ifdef CONFIG_S390_TAPE_BLOCK - .bread = tape_34xx_bread, - .free_bread = tape_34xx_free_bread, - .check_locate = tape_34xx_check_locate, -#endif .ioctl_fn = tape_34xx_ioctl, .mtop_array = tape_34xx_mtop }; @@ -1296,14 +1183,17 @@ tape_34xx_online(struct ccw_device *cdev) } static struct ccw_driver tape_34xx_driver = { - .name = "tape_34xx", - .owner = THIS_MODULE, + .driver = { + .name = "tape_34xx", + .owner = THIS_MODULE, + }, .ids = tape_34xx_ids, .probe = tape_generic_probe, .remove = tape_generic_remove, .set_online = tape_34xx_online, .set_offline = tape_generic_offline, .freeze = tape_generic_pm_suspend, + .int_class = IRQIO_TAP, }; static int diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 9821c588661..327cb19ad0b 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -1,5 +1,4 @@ /* - * drivers/s390/char/tape_3590.c * tape device discipline for 3590 tapes. * * Copyright IBM Corp. 2001, 2009 @@ -12,6 +11,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/bio.h> #include <asm/ebcdic.h> @@ -23,6 +23,8 @@ #include "tape_std.h" #include "tape_3590.h" +static struct workqueue_struct *tape_3590_wq; + /* * Pointer to debug area. */ @@ -30,7 +32,7 @@ debug_info_t *TAPE_DBF_AREA = NULL; EXPORT_SYMBOL(TAPE_DBF_AREA); /******************************************************************* - * Error Recovery fuctions: + * Error Recovery functions: * - Read Opposite: implemented * - Read Device (buffered) log: BRA * - Read Library log: BRA @@ -326,17 +328,17 @@ out: /* * Enable encryption */ -static int tape_3592_enable_crypt(struct tape_device *device) +static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device) { struct tape_request *request; char *data; DBF_EVENT(6, "tape_3592_enable_crypt\n"); if (!crypt_supported(device)) - return -ENOSYS; + return ERR_PTR(-ENOSYS); request = tape_alloc_request(2, 72); if (IS_ERR(request)) - return PTR_ERR(request); + return request; data = request->cpdata; memset(data,0,72); @@ -351,23 +353,42 @@ static int tape_3592_enable_crypt(struct tape_device *device) request->op = TO_CRYPT_ON; tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); + return request; +} + +static int tape_3592_enable_crypt(struct tape_device *device) +{ + struct tape_request *request; + + request = __tape_3592_enable_crypt(device); + if (IS_ERR(request)) + return PTR_ERR(request); return tape_do_io_free(device, request); } +static void tape_3592_enable_crypt_async(struct tape_device *device) +{ + struct tape_request *request; + + request = __tape_3592_enable_crypt(device); + if (!IS_ERR(request)) + tape_do_io_async_free(device, request); +} + /* * Disable encryption */ -static int tape_3592_disable_crypt(struct tape_device *device) +static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device) { struct tape_request *request; char *data; DBF_EVENT(6, "tape_3592_disable_crypt\n"); if (!crypt_supported(device)) - return -ENOSYS; + return ERR_PTR(-ENOSYS); request = tape_alloc_request(2, 72); if (IS_ERR(request)) - return PTR_ERR(request); + return request; data = request->cpdata; memset(data,0,72); @@ -380,9 +401,28 @@ static int tape_3592_disable_crypt(struct tape_device *device) tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); + return request; +} + +static int tape_3592_disable_crypt(struct tape_device *device) +{ + struct tape_request *request; + + request = __tape_3592_disable_crypt(device); + if (IS_ERR(request)) + return PTR_ERR(request); return tape_do_io_free(device, request); } +static void tape_3592_disable_crypt_async(struct tape_device *device) +{ + struct tape_request *request; + + request = __tape_3592_disable_crypt(device); + if (!IS_ERR(request)) + tape_do_io_async_free(device, request); +} + /* * IOCTL: Set encryption status */ @@ -454,8 +494,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) /* * SENSE Medium: Get Sense data about medium state */ -static int -tape_3590_sense_medium(struct tape_device *device) +static int tape_3590_sense_medium(struct tape_device *device) { struct tape_request *request; @@ -467,6 +506,18 @@ tape_3590_sense_medium(struct tape_device *device) return tape_do_io_free(device, request); } +static void tape_3590_sense_medium_async(struct tape_device *device) +{ + struct tape_request *request; + + request = tape_alloc_request(1, 128); + if (IS_ERR(request)) + return; + request->op = TO_MSEN; + tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); + tape_do_io_async_free(device, request); +} + /* * MTTELL: Tell block. Return the number of block relative to current file. */ @@ -543,15 +594,14 @@ tape_3590_read_opposite(struct tape_device *device, * 2. The attention msg is written to the "read subsystem data" buffer. * In this case we probably should print it to the console. */ -static int -tape_3590_read_attmsg(struct tape_device *device) +static void tape_3590_read_attmsg_async(struct tape_device *device) { struct tape_request *request; char *buf; request = tape_alloc_request(3, 4096); if (IS_ERR(request)) - return PTR_ERR(request); + return; request->op = TO_READ_ATTMSG; buf = request->cpdata; buf[0] = PREP_RD_SS_DATA; @@ -559,12 +609,15 @@ tape_3590_read_attmsg(struct tape_device *device) tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); - return tape_do_io_free(device, request); + tape_do_io_async_free(device, request); } /* * These functions are used to schedule follow-up actions from within an * interrupt context (like unsolicited interrupts). + * Note: the work handler is called by the system work queue. The tape + * commands started by the handler need to be asynchrounous, otherwise + * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). */ struct work_handler_data { struct tape_device *device; @@ -580,16 +633,16 @@ tape_3590_work_handler(struct work_struct *work) switch (p->op) { case TO_MSEN: - tape_3590_sense_medium(p->device); + tape_3590_sense_medium_async(p->device); break; case TO_READ_ATTMSG: - tape_3590_read_attmsg(p->device); + tape_3590_read_attmsg_async(p->device); break; case TO_CRYPT_ON: - tape_3592_enable_crypt(p->device); + tape_3592_enable_crypt_async(p->device); break; case TO_CRYPT_OFF: - tape_3592_disable_crypt(p->device); + tape_3592_disable_crypt_async(p->device); break; default: DBF_EVENT(3, "T3590: work handler undefined for " @@ -612,96 +665,10 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op) p->device = tape_get_device(device); p->op = op; - schedule_work(&p->work); + queue_work(tape_3590_wq, &p->work); return 0; } -#ifdef CONFIG_S390_TAPE_BLOCK -/* - * Tape Block READ - */ -static struct tape_request * -tape_3590_bread(struct tape_device *device, struct request *req) -{ - struct tape_request *request; - struct ccw1 *ccw; - int count = 0, start_block; - unsigned off; - char *dst; - struct bio_vec *bv; - struct req_iterator iter; - - DBF_EVENT(6, "xBREDid:"); - start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; - DBF_EVENT(6, "start_block = %i\n", start_block); - - rq_for_each_segment(bv, req, iter) - count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); - - request = tape_alloc_request(2 + count + 1, 4); - if (IS_ERR(request)) - return request; - request->op = TO_BLOCK; - *(__u32 *) request->cpdata = start_block; - ccw = request->cpaddr; - ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); - - /* - * We always setup a nop after the mode set ccw. This slot is - * used in tape_std_check_locate to insert a locate ccw if the - * current tape position doesn't match the start block to be read. - */ - ccw = tape_ccw_cc(ccw, NOP, 0, NULL); - - rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) { - ccw->flags = CCW_FLAG_CC; - ccw->cmd_code = READ_FORWARD; - ccw->count = TAPEBLOCK_HSEC_SIZE; - set_normalized_cda(ccw, (void *) __pa(dst)); - ccw++; - dst += TAPEBLOCK_HSEC_SIZE; - } - BUG_ON(off > bv->bv_len); - } - ccw = tape_ccw_end(ccw, NOP, 0, NULL); - DBF_EVENT(6, "xBREDccwg\n"); - return request; -} - -static void -tape_3590_free_bread(struct tape_request *request) -{ - struct ccw1 *ccw; - - /* Last ccw is a nop and doesn't need clear_normalized_cda */ - for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++) - if (ccw->cmd_code == READ_FORWARD) - clear_normalized_cda(ccw); - tape_free_request(request); -} - -/* - * check_locate is called just before the tape request is passed to - * the common io layer for execution. It has to check the current - * tape position and insert a locate ccw if it doesn't match the - * start block for the request. - */ -static void -tape_3590_check_locate(struct tape_device *device, struct tape_request *request) -{ - __u32 *start_block; - - start_block = (__u32 *) request->cpdata; - if (*start_block != device->blk_data.block_position) { - /* Add the start offset of the file to get the real block. */ - *start_block += device->bof; - tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); - } -} -#endif - static void tape_3590_med_state_set(struct tape_device *device, struct tape_3590_med_sense *sense) { @@ -742,10 +709,8 @@ static void tape_3590_med_state_set(struct tape_device *device, static int tape_3590_done(struct tape_device *device, struct tape_request *request) { - struct tape_3590_disc_data *disc_data; DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); - disc_data = device->discdata; switch (request->op) { case TO_BSB: @@ -797,7 +762,7 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) } /* - * This fuction is called, when error recovery was successfull + * This function is called, when error recovery was successful */ static inline int tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) @@ -808,7 +773,7 @@ tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) } /* - * This fuction is called, when error recovery was not successfull + * This function is called, when error recovery was not successful */ static inline int tape_3590_erp_failed(struct tape_device *device, struct tape_request *request, @@ -1340,17 +1305,12 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) static int tape_3590_crypt_error(struct tape_device *device, struct tape_request *request, struct irb *irb) { - u8 cu_rc, ekm_rc1; + u8 cu_rc; u16 ekm_rc2; - u32 drv_rc; - const char *bus_id; char *sense; sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; - bus_id = dev_name(&device->cdev->dev); cu_rc = sense[0]; - drv_rc = *((u32*) &sense[5]) & 0xffffff; - ekm_rc1 = sense[9]; ekm_rc2 = *((u16*) &sense[10]); if ((cu_rc == 0) && (ekm_rc2 == 0xee31)) /* key not defined on EKM */ @@ -1375,21 +1335,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, struct irb *irb) { struct tape_3590_sense *sense; - int rc; - -#ifdef CONFIG_S390_TAPE_BLOCK - if (request->op == TO_BLOCK) { - /* - * Recovery for block device requests. Set the block_position - * to something invalid and retry. - */ - device->blk_data.block_position = -1; - if (request->retries-- <= 0) - return tape_3590_erp_failed(device, request, irb, -EIO); - else - return tape_3590_erp_retry(device, request, irb); - } -#endif sense = (struct tape_3590_sense *) irb->ecw; @@ -1400,7 +1345,6 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, * - "break": basic error recovery is done * - "goto out:": just print error message if available */ - rc = -EIO; switch (sense->rc_rqc) { case 0x1110: @@ -1628,7 +1572,7 @@ fail_kmalloc: static void tape_3590_cleanup_device(struct tape_device *device) { - flush_scheduled_work(); + flush_workqueue(tape_3590_wq); tape_std_unassign(device); kfree(device->discdata); @@ -1684,11 +1628,6 @@ static struct tape_discipline tape_discipline_3590 = { .irq = tape_3590_irq, .read_block = tape_std_read_block, .write_block = tape_std_write_block, -#ifdef CONFIG_S390_TAPE_BLOCK - .bread = tape_3590_bread, - .free_bread = tape_3590_free_bread, - .check_locate = tape_3590_check_locate, -#endif .ioctl_fn = tape_3590_ioctl, .mtop_array = tape_3590_mtop }; @@ -1707,14 +1646,17 @@ tape_3590_online(struct ccw_device *cdev) } static struct ccw_driver tape_3590_driver = { - .name = "tape_3590", - .owner = THIS_MODULE, + .driver = { + .name = "tape_3590", + .owner = THIS_MODULE, + }, .ids = tape_3590_ids, .probe = tape_generic_probe, .remove = tape_generic_remove, .set_offline = tape_generic_offline, .set_online = tape_3590_online, .freeze = tape_generic_pm_suspend, + .int_class = IRQIO_TAP, }; /* @@ -1732,11 +1674,17 @@ tape_3590_init(void) #endif DBF_EVENT(3, "3590 init\n"); + + tape_3590_wq = alloc_workqueue("tape_3590", 0, 0); + if (!tape_3590_wq) + return -ENOMEM; + /* Register driver for 3590 tapes. */ rc = ccw_driver_register(&tape_3590_driver); - if (rc) + if (rc) { + destroy_workqueue(tape_3590_wq); DBF_EVENT(3, "3590 init failed\n"); - else + } else DBF_EVENT(3, "3590 registered\n"); return rc; } @@ -1745,7 +1693,7 @@ static void tape_3590_exit(void) { ccw_driver_unregister(&tape_3590_driver); - + destroy_workqueue(tape_3590_wq); debug_unregister(TAPE_DBF_AREA); } diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h index 4534055f137..36b759e89d2 100644 --- a/drivers/s390/char/tape_3590.h +++ b/drivers/s390/char/tape_3590.h @@ -1,8 +1,7 @@ /* - * drivers/s390/char/tape_3590.h * tape device discipline for 3590 tapes. * - * Copyright IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001, 2006 * Author(s): Stefan Bader <shbader@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c deleted file mode 100644 index 097da8ce6be..00000000000 --- a/drivers/s390/char/tape_block.c +++ /dev/null @@ -1,439 +0,0 @@ -/* - * drivers/s390/char/tape_block.c - * block device frontend for tape device driver - * - * S390 and zSeries version - * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Carsten Otte <cotte@de.ibm.com> - * Tuan Ngo-Anh <ngoanh@de.ibm.com> - * Martin Schwidefsky <schwidefsky@de.ibm.com> - * Stefan Bader <shbader@de.ibm.com> - */ - -#define KMSG_COMPONENT "tape" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - -#include <linux/fs.h> -#include <linux/module.h> -#include <linux/blkdev.h> -#include <linux/interrupt.h> -#include <linux/buffer_head.h> -#include <linux/kernel.h> - -#include <asm/debug.h> - -#define TAPE_DBF_AREA tape_core_dbf - -#include "tape.h" - -#define TAPEBLOCK_MAX_SEC 100 -#define TAPEBLOCK_MIN_REQUEUE 3 - -/* - * 2003/11/25 Stefan Bader <shbader@de.ibm.com> - * - * In 2.5/2.6 the block device request function is very likely to be called - * with disabled interrupts (e.g. generic_unplug_device). So the driver can't - * just call any function that tries to allocate CCW requests from that con- - * text since it might sleep. There are two choices to work around this: - * a) do not allocate with kmalloc but use its own memory pool - * b) take requests from the queue outside that context, knowing that - * allocation might sleep - */ - -/* - * file operation structure for tape block frontend - */ -static int tapeblock_open(struct block_device *, fmode_t); -static int tapeblock_release(struct gendisk *, fmode_t); -static int tapeblock_medium_changed(struct gendisk *); -static int tapeblock_revalidate_disk(struct gendisk *); - -static const struct block_device_operations tapeblock_fops = { - .owner = THIS_MODULE, - .open = tapeblock_open, - .release = tapeblock_release, - .media_changed = tapeblock_medium_changed, - .revalidate_disk = tapeblock_revalidate_disk, -}; - -static int tapeblock_major = 0; - -static void -tapeblock_trigger_requeue(struct tape_device *device) -{ - /* Protect against rescheduling. */ - if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0) - return; - schedule_work(&device->blk_data.requeue_task); -} - -/* - * Post finished request. - */ -static void -__tapeblock_end_request(struct tape_request *ccw_req, void *data) -{ - struct tape_device *device; - struct request *req; - - DBF_LH(6, "__tapeblock_end_request()\n"); - - device = ccw_req->device; - req = (struct request *) data; - blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO); - if (ccw_req->rc == 0) - /* Update position. */ - device->blk_data.block_position = - (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B; - else - /* We lost the position information due to an error. */ - device->blk_data.block_position = -1; - device->discipline->free_bread(ccw_req); - if (!list_empty(&device->req_queue) || - blk_peek_request(device->blk_data.request_queue)) - tapeblock_trigger_requeue(device); -} - -/* - * Feed the tape device CCW queue with requests supplied in a list. - */ -static int -tapeblock_start_request(struct tape_device *device, struct request *req) -{ - struct tape_request * ccw_req; - int rc; - - DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req); - - ccw_req = device->discipline->bread(device, req); - if (IS_ERR(ccw_req)) { - DBF_EVENT(1, "TBLOCK: bread failed\n"); - blk_end_request_all(req, -EIO); - return PTR_ERR(ccw_req); - } - ccw_req->callback = __tapeblock_end_request; - ccw_req->callback_data = (void *) req; - ccw_req->retries = TAPEBLOCK_RETRIES; - - rc = tape_do_io_async(device, ccw_req); - if (rc) { - /* - * Start/enqueueing failed. No retries in - * this case. - */ - blk_end_request_all(req, -EIO); - device->discipline->free_bread(ccw_req); - } - - return rc; -} - -/* - * Move requests from the block device request queue to the tape device ccw - * queue. - */ -static void -tapeblock_requeue(struct work_struct *work) { - struct tape_blk_data * blkdat; - struct tape_device * device; - struct request_queue * queue; - int nr_queued; - struct request * req; - struct list_head * l; - int rc; - - blkdat = container_of(work, struct tape_blk_data, requeue_task); - device = blkdat->device; - if (!device) - return; - - spin_lock_irq(get_ccwdev_lock(device->cdev)); - queue = device->blk_data.request_queue; - - /* Count number of requests on ccw queue. */ - nr_queued = 0; - list_for_each(l, &device->req_queue) - nr_queued++; - spin_unlock(get_ccwdev_lock(device->cdev)); - - spin_lock_irq(&device->blk_data.request_queue_lock); - while ( - !blk_queue_plugged(queue) && - blk_peek_request(queue) && - nr_queued < TAPEBLOCK_MIN_REQUEUE - ) { - req = blk_fetch_request(queue); - if (rq_data_dir(req) == WRITE) { - DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); - spin_unlock_irq(&device->blk_data.request_queue_lock); - blk_end_request_all(req, -EIO); - spin_lock_irq(&device->blk_data.request_queue_lock); - continue; - } - nr_queued++; - spin_unlock_irq(&device->blk_data.request_queue_lock); - rc = tapeblock_start_request(device, req); - spin_lock_irq(&device->blk_data.request_queue_lock); - } - spin_unlock_irq(&device->blk_data.request_queue_lock); - atomic_set(&device->blk_data.requeue_scheduled, 0); -} - -/* - * Tape request queue function. Called from ll_rw_blk.c - */ -static void -tapeblock_request_fn(struct request_queue *queue) -{ - struct tape_device *device; - - device = (struct tape_device *) queue->queuedata; - DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device); - BUG_ON(device == NULL); - tapeblock_trigger_requeue(device); -} - -/* - * This function is called for every new tapedevice - */ -int -tapeblock_setup_device(struct tape_device * device) -{ - struct tape_blk_data * blkdat; - struct gendisk * disk; - int rc; - - blkdat = &device->blk_data; - blkdat->device = device; - spin_lock_init(&blkdat->request_queue_lock); - atomic_set(&blkdat->requeue_scheduled, 0); - - blkdat->request_queue = blk_init_queue( - tapeblock_request_fn, - &blkdat->request_queue_lock - ); - if (!blkdat->request_queue) - return -ENOMEM; - - elevator_exit(blkdat->request_queue->elevator); - rc = elevator_init(blkdat->request_queue, "noop"); - if (rc) - goto cleanup_queue; - - blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); - blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); - blk_queue_max_segments(blkdat->request_queue, -1L); - blk_queue_max_segment_size(blkdat->request_queue, -1L); - blk_queue_segment_boundary(blkdat->request_queue, -1L); - - disk = alloc_disk(1); - if (!disk) { - rc = -ENOMEM; - goto cleanup_queue; - } - - disk->major = tapeblock_major; - disk->first_minor = device->first_minor; - disk->fops = &tapeblock_fops; - disk->private_data = tape_get_device(device); - disk->queue = blkdat->request_queue; - set_capacity(disk, 0); - sprintf(disk->disk_name, "btibm%d", - device->first_minor / TAPE_MINORS_PER_DEV); - - blkdat->disk = disk; - blkdat->medium_changed = 1; - blkdat->request_queue->queuedata = tape_get_device(device); - - add_disk(disk); - - tape_get_device(device); - INIT_WORK(&blkdat->requeue_task, tapeblock_requeue); - - return 0; - -cleanup_queue: - blk_cleanup_queue(blkdat->request_queue); - blkdat->request_queue = NULL; - - return rc; -} - -void -tapeblock_cleanup_device(struct tape_device *device) -{ - flush_scheduled_work(); - tape_put_device(device); - - if (!device->blk_data.disk) { - goto cleanup_queue; - } - - del_gendisk(device->blk_data.disk); - device->blk_data.disk->private_data = NULL; - tape_put_device(device); - put_disk(device->blk_data.disk); - - device->blk_data.disk = NULL; -cleanup_queue: - device->blk_data.request_queue->queuedata = NULL; - tape_put_device(device); - - blk_cleanup_queue(device->blk_data.request_queue); - device->blk_data.request_queue = NULL; -} - -/* - * Detect number of blocks of the tape. - * FIXME: can we extent this to detect the blocks size as well ? - */ -static int -tapeblock_revalidate_disk(struct gendisk *disk) -{ - struct tape_device * device; - unsigned int nr_of_blks; - int rc; - - device = (struct tape_device *) disk->private_data; - BUG_ON(!device); - - if (!device->blk_data.medium_changed) - return 0; - - rc = tape_mtop(device, MTFSFM, 1); - if (rc) - return rc; - - rc = tape_mtop(device, MTTELL, 1); - if (rc < 0) - return rc; - - pr_info("%s: Determining the size of the recorded area...\n", - dev_name(&device->cdev->dev)); - DBF_LH(3, "Image file ends at %d\n", rc); - nr_of_blks = rc; - - /* This will fail for the first file. Catch the error by checking the - * position. */ - tape_mtop(device, MTBSF, 1); - - rc = tape_mtop(device, MTTELL, 1); - if (rc < 0) - return rc; - - if (rc > nr_of_blks) - return -EINVAL; - - DBF_LH(3, "Image file starts at %d\n", rc); - device->bof = rc; - nr_of_blks -= rc; - - pr_info("%s: The size of the recorded area is %i blocks\n", - dev_name(&device->cdev->dev), nr_of_blks); - set_capacity(device->blk_data.disk, - nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512)); - - device->blk_data.block_position = 0; - device->blk_data.medium_changed = 0; - return 0; -} - -static int -tapeblock_medium_changed(struct gendisk *disk) -{ - struct tape_device *device; - - device = (struct tape_device *) disk->private_data; - DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n", - device, device->blk_data.medium_changed); - - return device->blk_data.medium_changed; -} - -/* - * Block frontend tape device open function. - */ -static int -tapeblock_open(struct block_device *bdev, fmode_t mode) -{ - struct gendisk * disk = bdev->bd_disk; - struct tape_device * device; - int rc; - - device = tape_get_device(disk->private_data); - - if (device->required_tapemarks) { - DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); - pr_warning("%s: Opening the tape failed because of missing " - "end-of-file marks\n", dev_name(&device->cdev->dev)); - rc = -EPERM; - goto put_device; - } - - rc = tape_open(device); - if (rc) - goto put_device; - - rc = tapeblock_revalidate_disk(disk); - if (rc) - goto release; - - /* - * Note: The reference to <device> is hold until the release function - * is called. - */ - tape_state_set(device, TS_BLKUSE); - return 0; - -release: - tape_release(device); - put_device: - tape_put_device(device); - return rc; -} - -/* - * Block frontend tape device release function. - * - * Note: One reference to the tape device was made by the open function. So - * we just get the pointer here and release the reference. - */ -static int -tapeblock_release(struct gendisk *disk, fmode_t mode) -{ - struct tape_device *device = disk->private_data; - - tape_state_set(device, TS_IN_USE); - tape_release(device); - tape_put_device(device); - - return 0; -} - -/* - * Initialize block device frontend. - */ -int -tapeblock_init(void) -{ - int rc; - - /* Register the tape major number to the kernel */ - rc = register_blkdev(tapeblock_major, "tBLK"); - if (rc < 0) - return rc; - - if (tapeblock_major == 0) - tapeblock_major = rc; - return 0; -} - -/* - * Deregister major for block device frontend - */ -void -tapeblock_exit(void) -{ - unregister_blkdev(tapeblock_major, "tBLK"); -} diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 539045acaad..6dc60725de9 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -1,9 +1,8 @@ /* - * drivers/s390/char/tape_char.c * character device frontend for tape device driver * * S390 and zSeries version - * Copyright IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001, 2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> @@ -17,7 +16,6 @@ #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/mtio.h> -#include <linux/smp_lock.h> #include <linux/compat.h> #include <asm/uaccess.h> @@ -53,6 +51,7 @@ static const struct file_operations tape_fops = #endif .open = tapechar_open, .release = tapechar_release, + .llseek = no_llseek, }; static int tapechar_major = TAPECHAR_MAJOR; @@ -139,7 +138,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) /* * If the tape isn't terminated yet, do it now. And since we then * are at the end of the tape there wouldn't be anything to read - * anyways. So we return immediatly. + * anyways. So we return immediately. */ if(device->required_tapemarks) { return tape_std_terminate_write(device); @@ -161,11 +160,6 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) if (rc) return rc; -#ifdef CONFIG_S390_TAPE_BLOCK - /* Changes position. */ - device->blk_data.medium_changed = 1; -#endif - DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); /* Let the discipline build the ccw chain. */ request = device->discipline->read_block(device, block_size); @@ -218,11 +212,6 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t if (rc) return rc; -#ifdef CONFIG_S390_TAPE_BLOCK - /* Changes position. */ - device->blk_data.medium_changed = 1; -#endif - DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); /* Let the discipline build the ccw chain. */ @@ -284,13 +273,13 @@ tapechar_open (struct inode *inode, struct file *filp) int minor, rc; DBF_EVENT(6, "TCHAR:open: %i:%i\n", - imajor(filp->f_path.dentry->d_inode), - iminor(filp->f_path.dentry->d_inode)); + imajor(file_inode(filp)), + iminor(file_inode(filp))); - if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) + if (imajor(file_inode(filp)) != tapechar_major) return -ENODEV; - minor = iminor(filp->f_path.dentry->d_inode); + minor = iminor(file_inode(filp)); device = tape_find_device(minor / TAPE_MINORS_PER_DEV); if (IS_ERR(device)) { DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n"); @@ -379,9 +368,6 @@ __tapechar_ioctl(struct tape_device *device, case MTBSFM: case MTFSFM: case MTSEEK: -#ifdef CONFIG_S390_TAPE_BLOCK - device->blk_data.medium_changed = 1; -#endif if (device->required_tapemarks) tape_std_terminate_write(device); default: diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index b2864e3edb6..91c3c642c76 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c @@ -1,6 +1,5 @@ /* - * (C) Copyright IBM Corp. 2004 - * tape_class.c + * Copyright IBM Corp. 2004 * * Tape class device support * @@ -11,11 +10,13 @@ #define KMSG_COMPONENT "tape" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/slab.h> + #include "tape_class.h" MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>"); MODULE_DESCRIPTION( - "(C) Copyright IBM Corp. 2004 All Rights Reserved.\n" + "Copyright IBM Corp. 2004 All Rights Reserved.\n" "tape_class.c" ); MODULE_LICENSE("GPL"); @@ -76,7 +77,7 @@ struct tape_class_device *register_tape_dev( tcd->class_device = device_create(tape_class, device, tcd->char_device->dev, NULL, "%s", tcd->device_name); - rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0; + rc = PTR_RET(tcd->class_device); if (rc) goto fail_with_cdev; rc = sysfs_create_link( diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h index 707b7f48c23..a332c10d50a 100644 --- a/drivers/s390/char/tape_class.h +++ b/drivers/s390/char/tape_class.h @@ -1,6 +1,5 @@ /* - * (C) Copyright IBM Corp. 2004 All Rights Reserved. - * tape_class.h + * Copyright IBM Corp. 2004 All Rights Reserved. * * Tape class device support * @@ -14,8 +13,6 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/major.h> -#include <linux/kobject.h> -#include <linux/kobj_map.h> #include <linux/cdev.h> #include <linux/device.h> diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 81b094e480e..f3b5123faf0 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -1,5 +1,4 @@ /* - * drivers/s390/char/tape_core.c * basic function of the tape device driver * * S390 and zSeries version @@ -20,6 +19,7 @@ #include <linux/spinlock.h> // for locks #include <linux/vmalloc.h> #include <linux/list.h> +#include <linux/slab.h> #include <asm/types.h> // for variable types @@ -208,29 +208,79 @@ tape_state_set(struct tape_device *device, enum tape_state newstate) wake_up(&device->state_change_wq); } +struct tape_med_state_work_data { + struct tape_device *device; + enum tape_medium_state state; + struct work_struct work; +}; + +static void +tape_med_state_work_handler(struct work_struct *work) +{ + static char env_state_loaded[] = "MEDIUM_STATE=LOADED"; + static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED"; + struct tape_med_state_work_data *p = + container_of(work, struct tape_med_state_work_data, work); + struct tape_device *device = p->device; + char *envp[] = { NULL, NULL }; + + switch (p->state) { + case MS_UNLOADED: + pr_info("%s: The tape cartridge has been successfully " + "unloaded\n", dev_name(&device->cdev->dev)); + envp[0] = env_state_unloaded; + kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); + break; + case MS_LOADED: + pr_info("%s: A tape cartridge has been mounted\n", + dev_name(&device->cdev->dev)); + envp[0] = env_state_loaded; + kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); + break; + default: + break; + } + tape_put_device(device); + kfree(p); +} + +static void +tape_med_state_work(struct tape_device *device, enum tape_medium_state state) +{ + struct tape_med_state_work_data *p; + + p = kzalloc(sizeof(*p), GFP_ATOMIC); + if (p) { + INIT_WORK(&p->work, tape_med_state_work_handler); + p->device = tape_get_device(device); + p->state = state; + schedule_work(&p->work); + } +} + void tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) { - if (device->medium_state == newstate) + enum tape_medium_state oldstate; + + oldstate = device->medium_state; + if (oldstate == newstate) return; + device->medium_state = newstate; switch(newstate){ case MS_UNLOADED: device->tape_generic_status |= GMT_DR_OPEN(~0); - if (device->medium_state == MS_LOADED) - pr_info("%s: The tape cartridge has been successfully " - "unloaded\n", dev_name(&device->cdev->dev)); + if (oldstate == MS_LOADED) + tape_med_state_work(device, MS_UNLOADED); break; case MS_LOADED: device->tape_generic_status &= ~GMT_DR_OPEN(~0); - if (device->medium_state == MS_UNLOADED) - pr_info("%s: A tape cartridge has been mounted\n", - dev_name(&device->cdev->dev)); + if (oldstate == MS_UNLOADED) + tape_med_state_work(device, MS_LOADED); break; default: - // print nothing break; } - device->medium_state = newstate; wake_up(&device->state_change_wq); } @@ -350,9 +400,6 @@ tape_generic_online(struct tape_device *device, rc = tapechar_setup_device(device); if (rc) goto out_minor; - rc = tapeblock_setup_device(device); - if (rc) - goto out_char; tape_state_set(device, TS_UNUSED); @@ -360,8 +407,6 @@ tape_generic_online(struct tape_device *device, return 0; -out_char: - tapechar_cleanup_device(device); out_minor: tape_remove_minor(device); out_discipline: @@ -375,7 +420,6 @@ out: static void tape_cleanup_device(struct tape_device *device) { - tapeblock_cleanup_device(device); tapechar_cleanup_device(device); device->discipline->cleanup_device(device); module_put(device->discipline->owner); @@ -734,10 +778,6 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) { int rc; -#ifdef CONFIG_S390_TAPE_BLOCK - if (request->op == TO_BLOCK) - device->discipline->check_locate(device, request); -#endif rc = ccw_device_start( device->cdev, request->cpaddr, @@ -1076,15 +1116,14 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) /* FIXME: What to do with the request? */ switch (PTR_ERR(irb)) { case -ETIMEDOUT: - DBF_LH(1, "(%s): Request timed out\n", - dev_name(&cdev->dev)); + DBF_LH(1, "(%08x): Request timed out\n", + device->cdev_id); case -EIO: __tape_end_request(device, request, -EIO); break; default: - DBF_LH(1, "(%s): Unexpected i/o error %li\n", - dev_name(&cdev->dev), - PTR_ERR(irb)); + DBF_LH(1, "(%08x): Unexpected i/o error %li\n", + device->cdev_id, PTR_ERR(irb)); } return; } @@ -1203,7 +1242,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) } /* - * Tape device open function used by tape_char & tape_block frontends. + * Tape device open function used by tape_char frontend. */ int tape_open(struct tape_device *device) @@ -1233,7 +1272,7 @@ tape_open(struct tape_device *device) } /* - * Tape device release function used by tape_char & tape_block frontends. + * Tape device release function used by tape_char frontend. */ int tape_release(struct tape_device *device) @@ -1294,7 +1333,6 @@ tape_init (void) DBF_EVENT(3, "tape init\n"); tape_proc_init(); tapechar_init (); - tapeblock_init (); return 0; } @@ -1308,7 +1346,6 @@ tape_exit(void) /* Get rid of the frontends */ tapechar_exit(); - tapeblock_exit(); tape_proc_cleanup(); debug_unregister (TAPE_DBF_AREA); } diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c index 0ceb37984f7..8733b232a11 100644 --- a/drivers/s390/char/tape_proc.c +++ b/drivers/s390/char/tape_proc.c @@ -1,9 +1,8 @@ /* - * drivers/s390/char/tape.c * tape device driver for S/390 and zSeries tapes. * * S390 and zSeries version - * Copyright (C) 2001 IBM Corporation + * Copyright IBM Corp. 2001 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index 03f07e5dd6e..3478e19ae19 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c @@ -1,9 +1,8 @@ /* - * drivers/s390/char/tape_std.c * standard tape device functions for ibm tapes. * * S390 and zSeries version - * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2001, 2002 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> @@ -47,8 +46,8 @@ tape_std_assign_timeout(unsigned long data) device->cdev_id); rc = tape_cancel_io(device, request); if(rc) - DBF_EVENT(3, "(%s): Assign timeout: Cancel failed with rc = %i\n", - dev_name(&device->cdev->dev), rc); + DBF_EVENT(3, "(%08x): Assign timeout: Cancel failed with rc = " + "%i\n", device->cdev_id, rc); } int @@ -79,7 +78,8 @@ tape_std_assign(struct tape_device *device) rc = tape_do_io_interruptible(device, request); - del_timer(&timeout); + del_timer_sync(&timeout); + destroy_timer_on_stack(&timeout); if (rc != 0) { DBF_EVENT(3, "%08x: assign failed - device might be busy\n", @@ -564,7 +564,6 @@ int tape_std_mtreten(struct tape_device *device, int mt_count) { struct tape_request *request; - int rc; request = tape_alloc_request(4, 0); if (IS_ERR(request)) @@ -576,7 +575,7 @@ tape_std_mtreten(struct tape_device *device, int mt_count) tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL); tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr); /* execute it, MTRETEN rc gets ignored */ - rc = tape_do_io_interruptible(device, request); + tape_do_io_interruptible(device, request); tape_free_request(request); return tape_mtop(device, MTREW, 1); } diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h index 1fc95235934..8c760c03683 100644 --- a/drivers/s390/char/tape_std.h +++ b/drivers/s390/char/tape_std.h @@ -1,8 +1,7 @@ /* - * drivers/s390/char/tape_std.h * standard tape device functions for ibm tapes. * - * Copyright (C) IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001, 2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -101,11 +100,7 @@ struct tape_request *tape_std_read_block(struct tape_device *, size_t); void tape_std_read_backward(struct tape_device *device, struct tape_request *request); struct tape_request *tape_std_write_block(struct tape_device *, size_t); -struct tape_request *tape_std_bread(struct tape_device *, struct request *); -void tape_std_free_bread(struct tape_request *); void tape_std_check_locate(struct tape_device *, struct tape_request *); -struct tape_request *tape_std_bwrite(struct request *, - struct tape_device *, int); /* Some non-mtop commands. */ int tape_std_assign(struct tape_device *); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 911822db614..e91b89dc6d1 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -1,11 +1,10 @@ /* - * drivers/s390/char/tty3270.c * IBM/3270 Driver - tty functions. * * Author(s): * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> - * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * -- Copyright IBM Corp. 2003 */ #include <linux/module.h> @@ -16,6 +15,7 @@ #include <linux/init.h> #include <linux/console.h> #include <linux/interrupt.h> +#include <linux/workqueue.h> #include <linux/slab.h> #include <linux/bootmem.h> @@ -61,7 +61,7 @@ struct tty3270_line { */ struct tty3270 { struct raw3270_view view; - struct tty_struct *tty; /* Pointer to tty structure */ + struct tty_port port; void **freemem_pages; /* Array of pages used for freemem. */ struct list_head freemem; /* List of free memory for strings. */ @@ -81,6 +81,8 @@ struct tty3270 { unsigned int highlight; /* Blink/reverse/underscore */ unsigned int f_color; /* Foreground color */ struct tty3270_line *screen; + unsigned int n_model, n_cols, n_rows; /* New model & size */ + struct work_struct resize_work; /* Input stuff. */ struct string *prompt; /* Output string for input area. */ @@ -116,16 +118,14 @@ struct tty3270 { #define TTY_UPDATE_ALL 16 /* Recreate screen. */ static void tty3270_update(struct tty3270 *); +static void tty3270_resize_work(struct work_struct *work); /* * Setup timeout for a device. On timeout trigger an update. */ static void tty3270_set_timer(struct tty3270 *tp, int expires) { - if (expires == 0) - del_timer(&tp->timer); - else - mod_timer(&tp->timer, jiffies + expires); + mod_timer(&tp->timer, jiffies + expires); } /* @@ -324,11 +324,10 @@ tty3270_blank_line(struct tty3270 *tp) static void tty3270_write_callback(struct raw3270_request *rq, void *data) { - struct tty3270 *tp; + struct tty3270 *tp = container_of(rq->view, struct tty3270, view); - tp = (struct tty3270 *) rq->view; if (rq->rc != 0) { - /* Write wasn't successfull. Refresh all. */ + /* Write wasn't successful. Refresh all. */ tp->update_flags = TTY_UPDATE_ALL; tty3270_set_timer(tp, 1); } @@ -450,10 +449,9 @@ tty3270_rcl_add(struct tty3270 *tp, char *input, int len) static void tty3270_rcl_backward(struct kbd_data *kbd) { - struct tty3270 *tp; + struct tty3270 *tp = container_of(kbd->port, struct tty3270, port); struct string *s; - tp = kbd->tty->driver_data; spin_lock_bh(&tp->view.lock); if (tp->inattr == TF_INPUT) { if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines) @@ -478,9 +476,8 @@ tty3270_rcl_backward(struct kbd_data *kbd) static void tty3270_exit_tty(struct kbd_data *kbd) { - struct tty3270 *tp; + struct tty3270 *tp = container_of(kbd->port, struct tty3270, port); - tp = kbd->tty->driver_data; raw3270_deactivate_view(&tp->view); } @@ -490,10 +487,9 @@ tty3270_exit_tty(struct kbd_data *kbd) static void tty3270_scroll_forward(struct kbd_data *kbd) { - struct tty3270 *tp; + struct tty3270 *tp = container_of(kbd->port, struct tty3270, port); int nr_up; - tp = kbd->tty->driver_data; spin_lock_bh(&tp->view.lock); nr_up = tp->nr_up - tp->view.rows + 2; if (nr_up < 0) @@ -513,10 +509,9 @@ tty3270_scroll_forward(struct kbd_data *kbd) static void tty3270_scroll_backward(struct kbd_data *kbd) { - struct tty3270 *tp; + struct tty3270 *tp = container_of(kbd->port, struct tty3270, port); int nr_up; - tp = kbd->tty->driver_data; spin_lock_bh(&tp->view.lock); nr_up = tp->nr_up + tp->view.rows - 2; if (nr_up + tp->view.rows - 2 > tp->nr_lines) @@ -537,11 +532,10 @@ static void tty3270_read_tasklet(struct raw3270_request *rrq) { static char kreset_data = TW_KR; - struct tty3270 *tp; + struct tty3270 *tp = container_of(rrq->view, struct tty3270, view); char *input; int len; - tp = (struct tty3270 *) rrq->view; spin_lock_bh(&tp->view.lock); /* * Two AID keys are special: For 0x7d (enter) the input line @@ -577,13 +571,10 @@ tty3270_read_tasklet(struct raw3270_request *rrq) raw3270_request_add_data(tp->kreset, &kreset_data, 1); raw3270_start(&tp->view, tp->kreset); - /* Emit input string. */ - if (tp->tty) { - while (len-- > 0) - kbd_keycode(tp->kbd, *input++); - /* Emit keycode for AID byte. */ - kbd_keycode(tp->kbd, 256 + tp->input->string[0]); - } + while (len-- > 0) + kbd_keycode(tp->kbd, *input++); + /* Emit keycode for AID byte. */ + kbd_keycode(tp->kbd, 256 + tp->input->string[0]); raw3270_request_reset(rrq); xchg(&tp->read, rrq); @@ -596,9 +587,10 @@ tty3270_read_tasklet(struct raw3270_request *rrq) static void tty3270_read_callback(struct raw3270_request *rq, void *data) { + struct tty3270 *tp = container_of(rq->view, struct tty3270, view); raw3270_get_view(rq->view); /* Schedule tasklet to pass input to tty. */ - tasklet_schedule(&((struct tty3270 *) rq->view)->readlet); + tasklet_schedule(&tp->readlet); } /* @@ -635,9 +627,8 @@ tty3270_issue_read(struct tty3270 *tp, int lock) static int tty3270_activate(struct raw3270_view *view) { - struct tty3270 *tp; + struct tty3270 *tp = container_of(view, struct tty3270, view); - tp = (struct tty3270 *) view; tp->update_flags = TTY_UPDATE_ALL; tty3270_set_timer(tp, 1); return 0; @@ -646,9 +637,8 @@ tty3270_activate(struct raw3270_view *view) static void tty3270_deactivate(struct raw3270_view *view) { - struct tty3270 *tp; + struct tty3270 *tp = container_of(view, struct tty3270, view); - tp = (struct tty3270 *) view; del_timer(&tp->timer); } @@ -690,6 +680,11 @@ tty3270_alloc_view(void) if (!tp->freemem_pages) goto out_tp; INIT_LIST_HEAD(&tp->freemem); + INIT_LIST_HEAD(&tp->lines); + INIT_LIST_HEAD(&tp->update); + INIT_LIST_HEAD(&tp->rcl_lines); + tp->rcl_max = 20; + for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) { tp->freemem_pages[pages] = (void *) __get_free_pages(GFP_KERNEL|GFP_DMA, 0); @@ -710,6 +705,15 @@ tty3270_alloc_view(void) tp->kbd = kbd_alloc(); if (!tp->kbd) goto out_reset; + + tty_port_init(&tp->port); + setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update, + (unsigned long) tp); + tasklet_init(&tp->readlet, + (void (*)(unsigned long)) tty3270_read_tasklet, + (unsigned long) tp->read); + INIT_WORK(&tp->resize_work, tty3270_resize_work); + return tp; out_reset: @@ -722,6 +726,7 @@ out_pages: while (pages--) free_pages((unsigned long) tp->freemem_pages[pages], 0); kfree(tp->freemem_pages); + tty_port_destroy(&tp->port); out_tp: kfree(tp); out_err: @@ -736,7 +741,6 @@ tty3270_free_view(struct tty3270 *tp) { int pages; - del_timer_sync(&tp->timer); kbd_free(tp->kbd); raw3270_request_free(tp->kreset); raw3270_request_free(tp->read); @@ -744,48 +748,103 @@ tty3270_free_view(struct tty3270 *tp) for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) free_pages((unsigned long) tp->freemem_pages[pages], 0); kfree(tp->freemem_pages); + tty_port_destroy(&tp->port); kfree(tp); } /* * Allocate tty3270 screen. */ -static int -tty3270_alloc_screen(struct tty3270 *tp) +static struct tty3270_line * +tty3270_alloc_screen(unsigned int rows, unsigned int cols) { + struct tty3270_line *screen; unsigned long size; int lines; - size = sizeof(struct tty3270_line) * (tp->view.rows - 2); - tp->screen = kzalloc(size, GFP_KERNEL); - if (!tp->screen) + size = sizeof(struct tty3270_line) * (rows - 2); + screen = kzalloc(size, GFP_KERNEL); + if (!screen) goto out_err; - for (lines = 0; lines < tp->view.rows - 2; lines++) { - size = sizeof(struct tty3270_cell) * tp->view.cols; - tp->screen[lines].cells = kzalloc(size, GFP_KERNEL); - if (!tp->screen[lines].cells) + for (lines = 0; lines < rows - 2; lines++) { + size = sizeof(struct tty3270_cell) * cols; + screen[lines].cells = kzalloc(size, GFP_KERNEL); + if (!screen[lines].cells) goto out_screen; } - return 0; + return screen; out_screen: while (lines--) - kfree(tp->screen[lines].cells); - kfree(tp->screen); + kfree(screen[lines].cells); + kfree(screen); out_err: - return -ENOMEM; + return ERR_PTR(-ENOMEM); } /* * Free tty3270 screen. */ static void -tty3270_free_screen(struct tty3270 *tp) +tty3270_free_screen(struct tty3270_line *screen, unsigned int rows) { int lines; - for (lines = 0; lines < tp->view.rows - 2; lines++) - kfree(tp->screen[lines].cells); - kfree(tp->screen); + for (lines = 0; lines < rows - 2; lines++) + kfree(screen[lines].cells); + kfree(screen); +} + +/* + * Resize tty3270 screen + */ +static void tty3270_resize_work(struct work_struct *work) +{ + struct tty3270 *tp = container_of(work, struct tty3270, resize_work); + struct tty3270_line *screen, *oscreen; + struct tty_struct *tty; + unsigned int orows; + struct winsize ws; + + screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols); + if (IS_ERR(screen)) + return; + /* Switch to new output size */ + spin_lock_bh(&tp->view.lock); + oscreen = tp->screen; + orows = tp->view.rows; + tp->view.model = tp->n_model; + tp->view.rows = tp->n_rows; + tp->view.cols = tp->n_cols; + tp->screen = screen; + free_string(&tp->freemem, tp->prompt); + free_string(&tp->freemem, tp->status); + tty3270_create_prompt(tp); + tty3270_create_status(tp); + tp->nr_up = 0; + while (tp->nr_lines < tp->view.rows - 2) + tty3270_blank_line(tp); + tp->update_flags = TTY_UPDATE_ALL; + spin_unlock_bh(&tp->view.lock); + tty3270_free_screen(oscreen, orows); + tty3270_set_timer(tp, 1); + /* Informat tty layer about new size */ + tty = tty_port_tty_get(&tp->port); + if (!tty) + return; + ws.ws_row = tp->view.rows - 2; + ws.ws_col = tp->view.cols; + tty_do_resize(tty, &ws); +} + +static void +tty3270_resize(struct raw3270_view *view, int model, int rows, int cols) +{ + struct tty3270 *tp = container_of(view, struct tty3270, view); + + tp->n_model = model; + tp->n_rows = rows; + tp->n_cols = cols; + schedule_work(&tp->resize_work); } /* @@ -794,16 +853,15 @@ tty3270_free_screen(struct tty3270 *tp) static void tty3270_release(struct raw3270_view *view) { - struct tty3270 *tp; - struct tty_struct *tty; + struct tty3270 *tp = container_of(view, struct tty3270, view); + struct tty_struct *tty = tty_port_tty_get(&tp->port); - tp = (struct tty3270 *) view; - tty = tp->tty; if (tty) { tty->driver_data = NULL; - tp->tty = tp->kbd->tty = NULL; + tty_port_tty_set(&tp->port, NULL); tty_hangup(tty); raw3270_put_view(&tp->view); + tty_kref_put(tty); } } @@ -813,8 +871,11 @@ tty3270_release(struct raw3270_view *view) static void tty3270_free(struct raw3270_view *view) { - tty3270_free_screen((struct tty3270 *) view); - tty3270_free_view((struct tty3270 *) view); + struct tty3270 *tp = container_of(view, struct tty3270, view); + + del_timer_sync(&tp->timer); + tty3270_free_screen(tp->screen, tp->view.rows); + tty3270_free_view(tp); } /* @@ -823,14 +884,12 @@ tty3270_free(struct raw3270_view *view) static void tty3270_del_views(void) { - struct tty3270 *tp; int i; - for (i = 0; i < tty3270_max_index; i++) { - tp = (struct tty3270 *) - raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR); - if (!IS_ERR(tp)) - raw3270_del_view(&tp->view); + for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) { + struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i); + if (!IS_ERR(view)) + raw3270_del_view(view); } } @@ -839,56 +898,40 @@ static struct raw3270_fn tty3270_fn = { .deactivate = tty3270_deactivate, .intv = (void *) tty3270_irq, .release = tty3270_release, - .free = tty3270_free + .free = tty3270_free, + .resize = tty3270_resize }; /* - * This routine is called whenever a 3270 tty is opened. + * This routine is called whenever a 3270 tty is opened first time. */ -static int -tty3270_open(struct tty_struct *tty, struct file * filp) +static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) { + struct raw3270_view *view; struct tty3270 *tp; int i, rc; - if (tty->count > 1) - return 0; /* Check if the tty3270 is already there. */ - tp = (struct tty3270 *) - raw3270_find_view(&tty3270_fn, - tty->index + RAW3270_FIRSTMINOR); - if (!IS_ERR(tp)) { + view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR); + if (!IS_ERR(view)) { + tp = container_of(view, struct tty3270, view); tty->driver_data = tp; tty->winsize.ws_row = tp->view.rows - 2; tty->winsize.ws_col = tp->view.cols; - tty->low_latency = 0; - tp->tty = tty; - tp->kbd->tty = tty; + tp->port.low_latency = 0; + /* why to reassign? */ + tty_port_tty_set(&tp->port, tty); tp->inattr = TF_INPUT; - return 0; + return tty_port_install(&tp->port, driver, tty); } if (tty3270_max_index < tty->index + 1) tty3270_max_index = tty->index + 1; - /* Quick exit if there is no device for tty->index. */ - if (PTR_ERR(tp) == -ENODEV) - return -ENODEV; - /* Allocate tty3270 structure on first open. */ tp = tty3270_alloc_view(); if (IS_ERR(tp)) return PTR_ERR(tp); - INIT_LIST_HEAD(&tp->lines); - INIT_LIST_HEAD(&tp->update); - INIT_LIST_HEAD(&tp->rcl_lines); - tp->rcl_max = 20; - setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update, - (unsigned long) tp); - tasklet_init(&tp->readlet, - (void (*)(unsigned long)) tty3270_read_tasklet, - (unsigned long) tp->read); - rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index + RAW3270_FIRSTMINOR); if (rc) { @@ -896,16 +939,17 @@ tty3270_open(struct tty_struct *tty, struct file * filp) return rc; } - rc = tty3270_alloc_screen(tp); - if (rc) { + tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols); + if (IS_ERR(tp->screen)) { + rc = PTR_ERR(tp->screen); raw3270_put_view(&tp->view); raw3270_del_view(&tp->view); + tty3270_free_view(tp); return rc; } - tp->tty = tty; - tty->low_latency = 0; - tty->driver_data = tp; + tty_port_tty_set(&tp->port, tty); + tp->port.low_latency = 0; tty->winsize.ws_row = tp->view.rows - 2; tty->winsize.ws_col = tp->view.cols; @@ -917,7 +961,7 @@ tty3270_open(struct tty_struct *tty, struct file * filp) for (i = 0; i < tp->view.rows - 2; i++) tty3270_blank_line(tp); - tp->kbd->tty = tty; + tp->kbd->port = &tp->port; tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty; tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward; tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward; @@ -925,6 +969,29 @@ tty3270_open(struct tty_struct *tty, struct file * filp) kbd_ascebc(tp->kbd, tp->view.ascebc); raw3270_activate_view(&tp->view); + + rc = tty_port_install(&tp->port, driver, tty); + if (rc) { + raw3270_put_view(&tp->view); + return rc; + } + + tty->driver_data = tp; + + return 0; +} + +/* + * This routine is called whenever a 3270 tty is opened. + */ +static int +tty3270_open(struct tty_struct *tty, struct file *filp) +{ + struct tty3270 *tp = tty->driver_data; + struct tty_port *port = &tp->port; + + port->count++; + tty_port_tty_set(port, tty); return 0; } @@ -935,18 +1002,24 @@ tty3270_open(struct tty_struct *tty, struct file * filp) static void tty3270_close(struct tty_struct *tty, struct file * filp) { - struct tty3270 *tp; + struct tty3270 *tp = tty->driver_data; if (tty->count > 1) return; - tp = (struct tty3270 *) tty->driver_data; if (tp) { tty->driver_data = NULL; - tp->tty = tp->kbd->tty = NULL; - raw3270_put_view(&tp->view); + tty_port_tty_set(&tp->port, NULL); } } +static void tty3270_cleanup(struct tty_struct *tty) +{ + struct tty3270 *tp = tty->driver_data; + + if (tp) + raw3270_put_view(&tp->view); +} + /* * We always have room. */ @@ -1391,7 +1464,7 @@ tty3270_escape_sequence(struct tty3270 *tp, char ch) tty3270_lf(tp); break; case 'Z': /* Respond ID. */ - kbd_puts_queue(tp->tty, "\033[?6c"); + kbd_puts_queue(&tp->port, "\033[?6c"); break; case '7': /* Save cursor position. */ tp->saved_cx = tp->cx; @@ -1437,11 +1510,11 @@ tty3270_escape_sequence(struct tty3270 *tp, char ch) tp->esc_state = ESnormal; if (ch == 'n' && !tp->esc_ques) { if (tp->esc_par[0] == 5) /* Status report. */ - kbd_puts_queue(tp->tty, "\033[0n"); + kbd_puts_queue(&tp->port, "\033[0n"); else if (tp->esc_par[0] == 6) { /* Cursor report. */ char buf[40]; sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1); - kbd_puts_queue(tp->tty, buf); + kbd_puts_queue(&tp->port, buf); } return; } @@ -1513,12 +1586,13 @@ tty3270_escape_sequence(struct tty3270 *tp, char ch) * String write routine for 3270 ttys */ static void -tty3270_do_write(struct tty3270 *tp, const unsigned char *buf, int count) +tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty, + const unsigned char *buf, int count) { int i_msg, i; spin_lock_bh(&tp->view.lock); - for (i_msg = 0; !tp->tty->stopped && i_msg < count; i_msg++) { + for (i_msg = 0; !tty->stopped && i_msg < count; i_msg++) { if (tp->esc_state != 0) { /* Continue escape sequence. */ tty3270_escape_sequence(tp, buf[i_msg]); @@ -1595,10 +1669,10 @@ tty3270_write(struct tty_struct * tty, if (!tp) return 0; if (tp->char_count > 0) { - tty3270_do_write(tp, tp->char_buf, tp->char_count); + tty3270_do_write(tp, tty, tp->char_buf, tp->char_count); tp->char_count = 0; } - tty3270_do_write(tp, buf, count); + tty3270_do_write(tp, tty, buf, count); return count; } @@ -1629,7 +1703,7 @@ tty3270_flush_chars(struct tty_struct *tty) if (!tp) return; if (tp->char_count > 0) { - tty3270_do_write(tp, tp->char_buf, tp->char_count); + tty3270_do_write(tp, tty, tp->char_buf, tp->char_count); tp->char_count = 0; } } @@ -1718,9 +1792,8 @@ tty3270_wait_until_sent(struct tty_struct *tty, int timeout) { } -static int -tty3270_ioctl(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg) +static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd, + unsigned long arg) { struct tty3270 *tp; @@ -1729,13 +1802,12 @@ tty3270_ioctl(struct tty_struct *tty, struct file *file, return -ENODEV; if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; - return kbd_ioctl(tp->kbd, file, cmd, arg); + return kbd_ioctl(tp->kbd, cmd, arg); } #ifdef CONFIG_COMPAT -static long -tty3270_compat_ioctl(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg) +static long tty3270_compat_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) { struct tty3270 *tp; @@ -1744,11 +1816,13 @@ tty3270_compat_ioctl(struct tty_struct *tty, struct file *file, return -ENODEV; if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; - return kbd_ioctl(tp->kbd, file, cmd, (unsigned long)compat_ptr(arg)); + return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg)); } #endif static const struct tty_operations tty3270_ops = { + .install = tty3270_install, + .cleanup = tty3270_cleanup, .open = tty3270_open, .close = tty3270_close, .write = tty3270_write, @@ -1768,6 +1842,22 @@ static const struct tty_operations tty3270_ops = { .set_termios = tty3270_set_termios }; +static void tty3270_create_cb(int minor) +{ + tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL); +} + +static void tty3270_destroy_cb(int minor) +{ + tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR); +} + +static struct raw3270_notifier tty3270_notifier = +{ + .create = tty3270_create_cb, + .destroy = tty3270_destroy_cb, +}; + /* * 3270 tty registration code called from tty_init(). * Most kernel services (incl. kmalloc) are available at this poimt. @@ -1777,24 +1867,26 @@ static int __init tty3270_init(void) struct tty_driver *driver; int ret; - driver = alloc_tty_driver(RAW3270_MAXDEVS); - if (!driver) - return -ENOMEM; + driver = tty_alloc_driver(RAW3270_MAXDEVS, + TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV | + TTY_DRIVER_RESET_TERMIOS); + if (IS_ERR(driver)) + return PTR_ERR(driver); /* * Initialize the tty_driver structure * Entries in tty3270_driver that are NOT initialized: * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc */ - driver->owner = THIS_MODULE; - driver->driver_name = "ttyTUB"; - driver->name = "ttyTUB"; + driver->driver_name = "tty3270"; + driver->name = "3270/tty"; driver->major = IBM_TTY3270_MAJOR; driver->minor_start = RAW3270_FIRSTMINOR; + driver->name_base = RAW3270_FIRSTMINOR; driver->type = TTY_DRIVER_TYPE_SYSTEM; driver->subtype = SYSTEM_TYPE_TTY; driver->init_termios = tty_std_termios; - driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_DYNAMIC_DEV; tty_set_operations(driver, &tty3270_ops); ret = tty_register_driver(driver); if (ret) { @@ -1802,6 +1894,7 @@ static int __init tty3270_init(void) return ret; } tty3270_driver = driver; + raw3270_register_notifier(&tty3270_notifier); return 0; } @@ -1810,9 +1903,11 @@ tty3270_exit(void) { struct tty_driver *driver; + raw3270_unregister_notifier(&tty3270_notifier); driver = tty3270_driver; tty3270_driver = NULL; tty_unregister_driver(driver); + put_tty_driver(driver); tty3270_del_views(); } diff --git a/drivers/s390/char/tty3270.h b/drivers/s390/char/tty3270.h index 799da57f039..11141a8f897 100644 --- a/drivers/s390/char/tty3270.h +++ b/drivers/s390/char/tty3270.h @@ -1,6 +1,4 @@ /* - * drivers/s390/char/tty3270.h - * * Copyright IBM Corp. 2007 * */ diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 921dcda7767..0fdedadff7b 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -1,34 +1,29 @@ /* - * Copyright IBM Corp. 2004,2007 + * Copyright IBM Corp. 2004, 2010 * Interface implementation for communication with the z/VM control program - * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> * + * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> * * z/VMs CP offers the possibility to issue commands via the diagnose code 8 * this driver implements a character device that issues these commands and * returns the answer of CP. - + * * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS */ -#define KMSG_COMPONENT "vmcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - #include <linux/fs.h> #include <linux/init.h> +#include <linux/compat.h> #include <linux/kernel.h> #include <linux/miscdevice.h> -#include <linux/module.h> +#include <linux/slab.h> +#include <linux/export.h> #include <asm/compat.h> #include <asm/cpcmd.h> #include <asm/debug.h> #include <asm/uaccess.h> #include "vmcp.h" -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>"); -MODULE_DESCRIPTION("z/VM CP interface"); - static debug_info_t *vmcp_debug; static int vmcp_open(struct inode *inode, struct file *file) @@ -54,7 +49,7 @@ static int vmcp_release(struct inode *inode, struct file *file) { struct vmcp_session *session; - session = (struct vmcp_session *)file->private_data; + session = file->private_data; file->private_data = NULL; free_pages((unsigned long)session->response, get_order(session->bufsize)); kfree(session); @@ -101,7 +96,7 @@ vmcp_write(struct file *file, const char __user *buff, size_t count, return -EFAULT; } cmd[count] = '\0'; - session = (struct vmcp_session *)file->private_data; + session = file->private_data; if (mutex_lock_interruptible(&session->mutex)) { kfree(cmd); return -ERESTARTSYS; @@ -143,7 +138,7 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) int __user *argp; int temp; - session = (struct vmcp_session *)file->private_data; + session = file->private_data; if (is_compat_task()) argp = compat_ptr(arg); else @@ -184,6 +179,7 @@ static const struct file_operations vmcp_fops = { .write = vmcp_write, .unlocked_ioctl = vmcp_ioctl, .compat_ioctl = vmcp_ioctl, + .llseek = no_llseek, }; static struct miscdevice vmcp_dev = { @@ -196,11 +192,8 @@ static int __init vmcp_init(void) { int ret; - if (!MACHINE_IS_VM) { - pr_warning("The z/VM CP interface device driver cannot be " - "loaded without z/VM\n"); - return -ENODEV; - } + if (!MACHINE_IS_VM) + return 0; vmcp_debug = debug_register("vmcp", 1, 1, 240); if (!vmcp_debug) @@ -213,19 +206,8 @@ static int __init vmcp_init(void) } ret = misc_register(&vmcp_dev); - if (ret) { + if (ret) debug_unregister(vmcp_debug); - return ret; - } - - return 0; -} - -static void __exit vmcp_exit(void) -{ - misc_deregister(&vmcp_dev); - debug_unregister(vmcp_debug); + return ret; } - -module_init(vmcp_init); -module_exit(vmcp_exit); +device_initcall(vmcp_init); diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h index 6a993948e18..1e29b041838 100644 --- a/drivers/s390/char/vmcp.h +++ b/drivers/s390/char/vmcp.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2004, 2005 IBM Corporation + * Copyright IBM Corp. 2004, 2005 * Interface implementation for communication with the z/VM control program * Version 1.0 * Author(s): Christian Borntraeger <cborntra@de.ibm.com> diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 7dfa5412d5a..a8848db7b09 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -1,5 +1,4 @@ /* - * drivers/s390/char/vmlogrdr.c * character device driver for reading z/VM system service records * * @@ -16,11 +15,12 @@ #include <linux/module.h> #include <linux/init.h> +#include <linux/slab.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/spinlock.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/uaccess.h> #include <asm/cpcmd.h> #include <asm/debug.h> @@ -29,7 +29,6 @@ #include <linux/kmod.h> #include <linux/cdev.h> #include <linux/device.h> -#include <linux/smp_lock.h> #include <linux/string.h> MODULE_AUTHOR @@ -96,6 +95,7 @@ static const struct file_operations vmlogrdr_fops = { .open = vmlogrdr_open, .release = vmlogrdr_release, .read = vmlogrdr_read, + .llseek = no_llseek, }; @@ -213,7 +213,7 @@ static void vmlogrdr_iucv_message_pending(struct iucv_path *path, static int vmlogrdr_get_recording_class_AB(void) { - char cp_command[]="QUERY COMMAND RECORDING "; + static const char cp_command[] = "QUERY COMMAND RECORDING "; char cp_response[80]; char *tail; int len,i; @@ -247,27 +247,25 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, char cp_command[80]; char cp_response[160]; char *onoff, *qid_string; + int rc; - memset(cp_command, 0x00, sizeof(cp_command)); - memset(cp_response, 0x00, sizeof(cp_response)); - - onoff = ((action == 1) ? "ON" : "OFF"); + onoff = ((action == 1) ? "ON" : "OFF"); qid_string = ((recording_class_AB == 1) ? " QID * " : ""); - /* + /* * The recording commands needs to be called with option QID * for guests that have previlege classes A or B. * Purging has to be done as separate step, because recording * can't be switched on as long as records are on the queue. * Doing both at the same time doesn't work. */ - - if (purge) { + if (purge && (action == 1)) { + memset(cp_command, 0x00, sizeof(cp_command)); + memset(cp_response, 0x00, sizeof(cp_response)); snprintf(cp_command, sizeof(cp_command), "RECORDING %s PURGE %s", logptr->recording_name, qid_string); - cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); } @@ -277,19 +275,33 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, logptr->recording_name, onoff, qid_string); - cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); /* The recording command will usually answer with 'Command complete' * on success, but when the specific service was never connected * before then there might be an additional informational message * 'HCPCRC8072I Recording entry not found' before the - * 'Command complete'. So I use strstr rather then the strncmp. + * 'Command complete'. So I use strstr rather then the strncmp. */ if (strstr(cp_response,"Command complete")) - return 0; + rc = 0; else - return -EIO; + rc = -EIO; + /* + * If we turn recording off, we have to purge any remaining records + * afterwards, as a large number of queued records may impact z/VM + * performance. + */ + if (purge && (action == 0)) { + memset(cp_command, 0x00, sizeof(cp_command)); + memset(cp_response, 0x00, sizeof(cp_response)); + snprintf(cp_command, sizeof(cp_command), + "RECORDING %s PURGE %s", + logptr->recording_name, + qid_string); + cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); + } + return rc; } @@ -301,7 +313,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) int ret; dev_num = iminor(inode); - if (dev_num > MAXMINOR) + if (dev_num >= MAXMINOR) return -ENODEV; logptr = &sys_ser[dev_num]; @@ -309,7 +321,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) * only allow for blocking reads to be open */ if (filp->f_flags & O_NONBLOCK) - return -ENOSYS; + return -EOPNOTSUPP; /* Besure this device hasn't already been opened */ spin_lock_bh(&logptr->priv_lock); @@ -636,17 +648,26 @@ static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) { - char cp_command[] = "QUERY RECORDING "; + static const char cp_command[] = "QUERY RECORDING "; int len; cpcmd(cp_command, buf, 4096, NULL); len = strlen(buf); return len; } - - static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show, NULL); +static struct attribute *vmlogrdr_drv_attrs[] = { + &driver_attr_recording_status.attr, + NULL, +}; +static struct attribute_group vmlogrdr_drv_attr_group = { + .attrs = vmlogrdr_drv_attrs, +}; +static const struct attribute_group *vmlogrdr_drv_attr_groups[] = { + &vmlogrdr_drv_attr_group, + NULL, +}; static struct attribute *vmlogrdr_attrs[] = { &dev_attr_autopurge.attr, @@ -655,6 +676,13 @@ static struct attribute *vmlogrdr_attrs[] = { &dev_attr_recording.attr, NULL, }; +static struct attribute_group vmlogrdr_attr_group = { + .attrs = vmlogrdr_attrs, +}; +static const struct attribute_group *vmlogrdr_attr_groups[] = { + &vmlogrdr_attr_group, + NULL, +}; static int vmlogrdr_pm_prepare(struct device *dev) { @@ -679,18 +707,14 @@ static const struct dev_pm_ops vmlogrdr_pm_ops = { .prepare = vmlogrdr_pm_prepare, }; -static struct attribute_group vmlogrdr_attr_group = { - .attrs = vmlogrdr_attrs, -}; - static struct class *vmlogrdr_class; static struct device_driver vmlogrdr_driver = { .name = "vmlogrdr", .bus = &iucv_bus, .pm = &vmlogrdr_pm_ops, + .groups = vmlogrdr_drv_attr_groups, }; - static int vmlogrdr_register_driver(void) { int ret; @@ -704,21 +728,14 @@ static int vmlogrdr_register_driver(void) if (ret) goto out_iucv; - ret = driver_create_file(&vmlogrdr_driver, - &driver_attr_recording_status); - if (ret) - goto out_driver; - vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); if (IS_ERR(vmlogrdr_class)) { ret = PTR_ERR(vmlogrdr_class); vmlogrdr_class = NULL; - goto out_attr; + goto out_driver; } return 0; -out_attr: - driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); out_driver: driver_unregister(&vmlogrdr_driver); out_iucv: @@ -732,7 +749,6 @@ static void vmlogrdr_unregister_driver(void) { class_destroy(vmlogrdr_class); vmlogrdr_class = NULL; - driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); driver_unregister(&vmlogrdr_driver); iucv_unregister(&vmlogrdr_iucv_handler, 1); } @@ -745,10 +761,11 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (dev) { - dev_set_name(dev, priv->internal_name); + dev_set_name(dev, "%s", priv->internal_name); dev->bus = &iucv_bus; dev->parent = iucv_root; dev->driver = &vmlogrdr_driver; + dev->groups = vmlogrdr_attr_groups; dev_set_drvdata(dev, priv); /* * The release function could be called after the @@ -766,11 +783,6 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) return ret; } - ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group); - if (ret) { - device_unregister(dev); - return ret; - } priv->class_device = device_create(vmlogrdr_class, dev, MKDEV(vmlogrdr_major, priv->minor_num), @@ -778,7 +790,6 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) if (IS_ERR(priv->class_device)) { ret = PTR_ERR(priv->class_device); priv->class_device=NULL; - sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group); device_unregister(dev); return ret; } @@ -791,7 +802,6 @@ static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv) { device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); if (priv->device != NULL) { - sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group); device_unregister(priv->device); priv->device=NULL; } diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index cc56fc708ba..0efb27f6f19 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -12,7 +12,8 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/cdev.h> -#include <linux/smp_lock.h> +#include <linux/slab.h> +#include <linux/module.h> #include <asm/uaccess.h> #include <asm/cio.h> @@ -63,14 +64,17 @@ static int ur_set_offline(struct ccw_device *cdev); static int ur_pm_suspend(struct ccw_device *cdev); static struct ccw_driver ur_driver = { - .name = "vmur", - .owner = THIS_MODULE, + .driver = { + .name = "vmur", + .owner = THIS_MODULE, + }, .ids = ur_ids, .probe = ur_probe, .remove = ur_remove, .set_online = ur_set_online, .set_offline = ur_set_offline, .freeze = ur_pm_suspend, + .int_class = IRQIO_VMR, }; static DEFINE_MUTEX(vmur_mutex); @@ -85,7 +89,7 @@ static DEFINE_MUTEX(vmur_mutex); * urd references: * - ur_probe gets a urd reference, ur_remove drops the reference * dev_get_drvdata(&cdev->dev) - * - ur_open gets a urd reference, ur_relase drops the reference + * - ur_open gets a urd reference, ur_release drops the reference * (urf->urd) * * cdev references: @@ -699,7 +703,7 @@ static int ur_open(struct inode *inode, struct file *file) * We treat the minor number as the devno of the ur device * to find in the driver tree. */ - devno = MINOR(file->f_dentry->d_inode->i_rdev); + devno = MINOR(file_inode(file)->i_rdev); urd = urdev_get_from_devno(devno); if (!urd) { @@ -899,7 +903,7 @@ static int ur_set_online(struct ccw_device *cdev) goto fail_urdev_put; } - cdev_init(urd->char_device, &ur_fops); + urd->char_device->ops = &ur_fops; urd->char_device->dev = MKDEV(major, minor); urd->char_device->owner = ur_fops.owner; @@ -918,8 +922,8 @@ static int ur_set_online(struct ccw_device *cdev) goto fail_free_cdev; } - urd->device = device_create(vmur_class, NULL, urd->char_device->dev, - NULL, "%s", node_id); + urd->device = device_create(vmur_class, &cdev->dev, + urd->char_device->dev, NULL, "%s", node_id); if (IS_ERR(urd->device)) { rc = PTR_ERR(urd->device); TRACE("ur_set_online: device_create rc=%d\n", rc); diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c deleted file mode 100644 index c974058e48d..00000000000 --- a/drivers/s390/char/vmwatchdog.c +++ /dev/null @@ -1,335 +0,0 @@ -/* - * Watchdog implementation based on z/VM Watchdog Timer API - * - * Copyright IBM Corp. 2004,2009 - * - * The user space watchdog daemon can use this driver as - * /dev/vmwatchdog to have z/VM execute the specified CP - * command when the timeout expires. The default command is - * "IPL", which which cause an immediate reboot. - */ -#define KMSG_COMPONENT "vmwatchdog" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - -#include <linux/init.h> -#include <linux/fs.h> -#include <linux/kernel.h> -#include <linux/miscdevice.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/suspend.h> -#include <linux/watchdog.h> - -#include <asm/ebcdic.h> -#include <asm/io.h> -#include <asm/uaccess.h> - -#define MAX_CMDLEN 240 -#define MIN_INTERVAL 15 -static char vmwdt_cmd[MAX_CMDLEN] = "IPL"; -static int vmwdt_conceal; - -static int vmwdt_nowayout = WATCHDOG_NOWAYOUT; - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); -MODULE_DESCRIPTION("z/VM Watchdog Timer"); -module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644); -MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers"); -module_param_named(conceal, vmwdt_conceal, bool, 0644); -MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog " - " is active"); -module_param_named(nowayout, vmwdt_nowayout, bool, 0); -MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started" - " (default=CONFIG_WATCHDOG_NOWAYOUT)"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); - -static unsigned int vmwdt_interval = 60; -static unsigned long vmwdt_is_open; -static int vmwdt_expect_close; - -static DEFINE_MUTEX(vmwdt_mutex); - -#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */ -#define VMWDT_RUNNING 1 /* The watchdog is armed */ - -enum vmwdt_func { - /* function codes */ - wdt_init = 0, - wdt_change = 1, - wdt_cancel = 2, - /* flags */ - wdt_conceal = 0x80000000, -}; - -static int __diag288(enum vmwdt_func func, unsigned int timeout, - char *cmd, size_t len) -{ - register unsigned long __func asm("2") = func; - register unsigned long __timeout asm("3") = timeout; - register unsigned long __cmdp asm("4") = virt_to_phys(cmd); - register unsigned long __cmdl asm("5") = len; - int err; - - err = -EINVAL; - asm volatile( - " diag %1,%3,0x288\n" - "0: la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "+d" (err) : "d"(__func), "d"(__timeout), - "d"(__cmdp), "d"(__cmdl) : "1", "cc"); - return err; -} - -static int vmwdt_keepalive(void) -{ - /* we allocate new memory every time to avoid having - * to track the state. static allocation is not an - * option since that might not be contiguous in real - * storage in case of a modular build */ - static char *ebc_cmd; - size_t len; - int ret; - unsigned int func; - - ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL); - if (!ebc_cmd) - return -ENOMEM; - - len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN); - ASCEBC(ebc_cmd, MAX_CMDLEN); - EBC_TOUPPER(ebc_cmd, MAX_CMDLEN); - - func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; - set_bit(VMWDT_RUNNING, &vmwdt_is_open); - ret = __diag288(func, vmwdt_interval, ebc_cmd, len); - WARN_ON(ret != 0); - kfree(ebc_cmd); - return ret; -} - -static int vmwdt_disable(void) -{ - int ret = __diag288(wdt_cancel, 0, "", 0); - WARN_ON(ret != 0); - clear_bit(VMWDT_RUNNING, &vmwdt_is_open); - return ret; -} - -static int __init vmwdt_probe(void) -{ - /* there is no real way to see if the watchdog is supported, - * so we try initializing it with a NOP command ("BEGIN") - * that won't cause any harm even if the following disable - * fails for some reason */ - static char __initdata ebc_begin[] = { - 194, 197, 199, 201, 213 - }; - if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) - return -EINVAL; - return vmwdt_disable(); -} - -static int vmwdt_open(struct inode *i, struct file *f) -{ - int ret; - if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) - return -EBUSY; - ret = vmwdt_keepalive(); - if (ret) - clear_bit(VMWDT_OPEN, &vmwdt_is_open); - return ret ? ret : nonseekable_open(i, f); -} - -static int vmwdt_close(struct inode *i, struct file *f) -{ - if (vmwdt_expect_close == 42) - vmwdt_disable(); - vmwdt_expect_close = 0; - clear_bit(VMWDT_OPEN, &vmwdt_is_open); - return 0; -} - -static struct watchdog_info vmwdt_info = { - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, - .firmware_version = 0, - .identity = "z/VM Watchdog Timer", -}; - -static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg) -{ - switch (cmd) { - case WDIOC_GETSUPPORT: - if (copy_to_user((void __user *)arg, &vmwdt_info, - sizeof(vmwdt_info))) - return -EFAULT; - return 0; - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - return put_user(0, (int __user *)arg); - case WDIOC_GETTEMP: - return -EINVAL; - case WDIOC_SETOPTIONS: - { - int options, ret; - if (get_user(options, (int __user *)arg)) - return -EFAULT; - ret = -EINVAL; - if (options & WDIOS_DISABLECARD) { - ret = vmwdt_disable(); - if (ret) - return ret; - } - if (options & WDIOS_ENABLECARD) { - ret = vmwdt_keepalive(); - } - return ret; - } - case WDIOC_GETTIMEOUT: - return put_user(vmwdt_interval, (int __user *)arg); - case WDIOC_SETTIMEOUT: - { - int interval; - if (get_user(interval, (int __user *)arg)) - return -EFAULT; - if (interval < MIN_INTERVAL) - return -EINVAL; - vmwdt_interval = interval; - } - return vmwdt_keepalive(); - case WDIOC_KEEPALIVE: - return vmwdt_keepalive(); - } - return -EINVAL; -} - -static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg) -{ - int rc; - - mutex_lock(&vmwdt_mutex); - rc = __vmwdt_ioctl(cmd, arg); - mutex_unlock(&vmwdt_mutex); - return (long) rc; -} - -static ssize_t vmwdt_write(struct file *f, const char __user *buf, - size_t count, loff_t *ppos) -{ - if(count) { - if (!vmwdt_nowayout) { - size_t i; - - /* note: just in case someone wrote the magic character - * five months ago... */ - vmwdt_expect_close = 0; - - for (i = 0; i != count; i++) { - char c; - if (get_user(c, buf+i)) - return -EFAULT; - if (c == 'V') - vmwdt_expect_close = 42; - } - } - /* someone wrote to us, we should restart timer */ - vmwdt_keepalive(); - } - return count; -} - -static int vmwdt_resume(void) -{ - clear_bit(VMWDT_OPEN, &vmwdt_is_open); - return NOTIFY_DONE; -} - -/* - * It makes no sense to go into suspend while the watchdog is running. - * Depending on the memory size, the watchdog might trigger, while we - * are still saving the memory. - * We reuse the open flag to ensure that suspend and watchdog open are - * exclusive operations - */ -static int vmwdt_suspend(void) -{ - if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { - pr_err("The system cannot be suspended while the watchdog" - " is in use\n"); - return NOTIFY_BAD; - } - if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) { - clear_bit(VMWDT_OPEN, &vmwdt_is_open); - pr_err("The system cannot be suspended while the watchdog" - " is running\n"); - return NOTIFY_BAD; - } - return NOTIFY_DONE; -} - -/* - * This function is called for suspend and resume. - */ -static int vmwdt_power_event(struct notifier_block *this, unsigned long event, - void *ptr) -{ - switch (event) { - case PM_POST_HIBERNATION: - case PM_POST_SUSPEND: - return vmwdt_resume(); - case PM_HIBERNATION_PREPARE: - case PM_SUSPEND_PREPARE: - return vmwdt_suspend(); - default: - return NOTIFY_DONE; - } -} - -static struct notifier_block vmwdt_power_notifier = { - .notifier_call = vmwdt_power_event, -}; - -static const struct file_operations vmwdt_fops = { - .open = &vmwdt_open, - .release = &vmwdt_close, - .unlocked_ioctl = &vmwdt_ioctl, - .write = &vmwdt_write, - .owner = THIS_MODULE, -}; - -static struct miscdevice vmwdt_dev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &vmwdt_fops, -}; - -static int __init vmwdt_init(void) -{ - int ret; - - ret = vmwdt_probe(); - if (ret) - return ret; - ret = register_pm_notifier(&vmwdt_power_notifier); - if (ret) - return ret; - /* - * misc_register() has to be the last action in module_init(), because - * file operations will be available right after this. - */ - ret = misc_register(&vmwdt_dev); - if (ret) { - unregister_pm_notifier(&vmwdt_power_notifier); - return ret; - } - return 0; -} -module_init(vmwdt_init); - -static void __exit vmwdt_exit(void) -{ - unregister_pm_notifier(&vmwdt_power_notifier); - misc_deregister(&vmwdt_dev); -} -module_exit(vmwdt_exit); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 3438658b66b..1884653e447 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -5,7 +5,7 @@ * * For more information please refer to Documentation/s390/zfcpdump.txt * - * Copyright IBM Corp. 2003,2008 + * Copyright IBM Corp. 2003, 2008 * Author(s): Michael Holzheu */ @@ -13,13 +13,16 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/init.h> +#include <linux/slab.h> #include <linux/miscdevice.h> #include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/memblock.h> + #include <asm/asm-offsets.h> #include <asm/ipl.h> #include <asm/sclp.h> #include <asm/setup.h> -#include <asm/sigp.h> #include <asm/uaccess.h> #include <asm/debug.h> #include <asm/processor.h> @@ -29,8 +32,8 @@ #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) -#define TO_USER 0 -#define TO_KERNEL 1 +#define TO_USER 1 +#define TO_KERNEL 0 #define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */ enum arch_id { @@ -61,6 +64,7 @@ static struct dentry *zcore_dir; static struct dentry *zcore_file; static struct dentry *zcore_memmap_file; static struct dentry *zcore_reipl_file; +static struct dentry *zcore_hsa_file; static struct ipl_parameter_block *ipl_block; /* @@ -71,11 +75,13 @@ static struct ipl_parameter_block *ipl_block; * @count: Size of buffer, which should be copied * @mode: Either TO_KERNEL or TO_USER */ -static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) +int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) { int offs, blk_num; static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); + if (!hsa_available) + return -ENODATA; if (count == 0) return 0; @@ -123,7 +129,7 @@ static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) } if (mode == TO_USER) { if (copy_to_user((__force __user void*) dest + offs, buf, - PAGE_SIZE)) + count - offs)) return -EFAULT; } else memcpy(dest + offs, buf, count - offs); @@ -141,56 +147,13 @@ static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) return memcpy_hsa(dest, src, count, TO_KERNEL); } -static int memcpy_real(void *dest, unsigned long src, size_t count) -{ - unsigned long flags; - int rc = -EFAULT; - register unsigned long _dest asm("2") = (unsigned long) dest; - register unsigned long _len1 asm("3") = (unsigned long) count; - register unsigned long _src asm("4") = src; - register unsigned long _len2 asm("5") = (unsigned long) count; - - if (count == 0) - return 0; - flags = __raw_local_irq_stnsm(0xf8UL); /* switch to real mode */ - asm volatile ( - "0: mvcle %1,%2,0x0\n" - "1: jo 0b\n" - " lhi %0,0x0\n" - "2:\n" - EX_TABLE(1b,2b) - : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1), - "+d" (_len2), "=m" (*((long*)dest)) - : "m" (*((long*)src)) - : "cc", "memory"); - __raw_local_irq_ssm(flags); - - return rc; -} - -static int memcpy_real_user(void __user *dest, unsigned long src, size_t count) -{ - static char buf[4096]; - int offs = 0, size; - - while (offs < count) { - size = min(sizeof(buf), count - offs); - if (memcpy_real(buf, src + offs, size)) - return -EFAULT; - if (copy_to_user(dest + offs, buf, size)) - return -EFAULT; - offs += size; - } - return 0; -} - static int __init init_cpu_info(enum arch_id arch) { struct save_area *sa; /* get info for boot cpu from lowcore, stored in the HSA */ - sa = kmalloc(sizeof(*sa), GFP_KERNEL); + sa = dump_save_area_create(0); if (!sa) return -ENOMEM; if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { @@ -198,7 +161,6 @@ static int __init init_cpu_info(enum arch_id arch) kfree(sa); return -EIO; } - zfcpdump_save_areas[0] = sa; return 0; } @@ -285,24 +247,25 @@ static int copy_lc(void __user *buf, void *sa, int sa_off, int len) static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) { unsigned long end; - int i = 0; + int i; if (count == 0) return 0; end = start + count; - while (zfcpdump_save_areas[i]) { + for (i = 0; i < dump_save_areas.count; i++) { unsigned long cp_start, cp_end; /* copy range */ unsigned long sa_start, sa_end; /* save area range */ unsigned long prefix; unsigned long sa_off, len, buf_off; + struct save_area *save_area = dump_save_areas.areas[i]; - prefix = zfcpdump_save_areas[i]->pref_reg; + prefix = save_area->pref_reg; sa_start = prefix + sys_info.sa_base; sa_end = prefix + sys_info.sa_base + sys_info.sa_size; if ((end < sa_start) || (start > sa_end)) - goto next; + continue; cp_start = max(start, sa_start); cp_end = min(end, sa_end); @@ -311,15 +274,22 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) len = cp_end - cp_start; TRACE("copy_lc for: %lx\n", start); - if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len)) + if (copy_lc(buf + buf_off, save_area, sa_off, len)) return -EFAULT; -next: - i++; } return 0; } /* + * Release the HSA + */ +static void release_hsa(void) +{ + diag308(DIAG308_REL_HSA, NULL); + hsa_available = 0; +} + +/* * Read routine for zcore character device * First 4K are dump header * Next 32MB are HSA Memory @@ -360,9 +330,9 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, mem_offs = 0; /* Copy from HSA data */ - if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { - size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE - - mem_start)); + if (*ppos < sclp_get_hsa_size() + HEADER_SIZE) { + size = min((count - hdr_count), + (size_t) (sclp_get_hsa_size() - mem_start)); rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); if (rc) goto fail; @@ -372,8 +342,8 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, /* Copy from real mem */ size = count - mem_offs - hdr_count; - rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs, - size); + rc = copy_to_user_real(buf + hdr_count + mem_offs, + (void *) mem_start + mem_offs, size); if (rc) goto fail; @@ -405,8 +375,8 @@ static int zcore_open(struct inode *inode, struct file *filp) static int zcore_release(struct inode *inode, struct file *filep) { - diag308(DIAG308_REL_HSA, NULL); - hsa_available = 0; + if (hsa_available) + release_hsa(); return 0; } @@ -443,35 +413,26 @@ static ssize_t zcore_memmap_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(buf, count, ppos, filp->private_data, - MEMORY_CHUNKS * CHUNK_INFO_SIZE); + memblock.memory.cnt * CHUNK_INFO_SIZE); } static int zcore_memmap_open(struct inode *inode, struct file *filp) { - int i; + struct memblock_region *reg; char *buf; - struct mem_chunk *chunk_array; + int i = 0; - chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), - GFP_KERNEL); - if (!chunk_array) - return -ENOMEM; - detect_memory_layout(chunk_array); - buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL); + buf = kzalloc(memblock.memory.cnt * CHUNK_INFO_SIZE, GFP_KERNEL); if (!buf) { - kfree(chunk_array); return -ENOMEM; } - for (i = 0; i < MEMORY_CHUNKS; i++) { - sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ", - (unsigned long long) chunk_array[i].addr, - (unsigned long long) chunk_array[i].size); - if (chunk_array[i].size == 0) - break; + for_each_memblock(memory, reg) { + sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ", + (unsigned long long) reg->base, + (unsigned long long) reg->size); } - kfree(chunk_array); filp->private_data = buf; - return 0; + return nonseekable_open(inode, filp); } static int zcore_memmap_release(struct inode *inode, struct file *filp) @@ -485,6 +446,7 @@ static const struct file_operations zcore_memmap_fops = { .read = zcore_memmap_read, .open = zcore_memmap_open, .release = zcore_memmap_release, + .llseek = no_llseek, }; static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, @@ -499,7 +461,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, static int zcore_reipl_open(struct inode *inode, struct file *filp) { - return 0; + return nonseekable_open(inode, filp); } static int zcore_reipl_release(struct inode *inode, struct file *filp) @@ -512,6 +474,42 @@ static const struct file_operations zcore_reipl_fops = { .write = zcore_reipl_write, .open = zcore_reipl_open, .release = zcore_reipl_release, + .llseek = no_llseek, +}; + +static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + static char str[18]; + + if (hsa_available) + snprintf(str, sizeof(str), "%lx\n", sclp_get_hsa_size()); + else + snprintf(str, sizeof(str), "0\n"); + return simple_read_from_buffer(buf, count, ppos, str, strlen(str)); +} + +static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + char value; + + if (*ppos != 0) + return -EPIPE; + if (copy_from_user(&value, buf, 1)) + return -EFAULT; + if (value != '0') + return -EINVAL; + release_hsa(); + return count; +} + +static const struct file_operations zcore_hsa_fops = { + .owner = THIS_MODULE, + .write = zcore_hsa_write, + .read = zcore_hsa_read, + .open = nonseekable_open, + .llseek = no_llseek, }; #ifdef CONFIG_32BIT @@ -550,7 +548,7 @@ static void __init set_lc_mask(struct save_area *map) /* * Initialize dump globals for a given architecture */ -static int __init sys_info_init(enum arch_id arch) +static int __init sys_info_init(enum arch_id arch, unsigned long mem_end) { int rc; @@ -572,75 +570,55 @@ static int __init sys_info_init(enum arch_id arch) rc = init_cpu_info(arch); if (rc) return rc; - sys_info.mem_size = real_memory_size; + sys_info.mem_size = mem_end; return 0; } static int __init check_sdias(void) { - int rc, act_hsa_size; - - rc = sclp_sdias_blk_count(); - if (rc < 0) { + if (!sclp_get_hsa_size()) { TRACE("Could not determine HSA size\n"); - return rc; - } - act_hsa_size = (rc - 1) * PAGE_SIZE; - if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { - TRACE("HSA size too small: %i\n", act_hsa_size); - return -EINVAL; + return -ENODEV; } return 0; } -static int __init get_mem_size(unsigned long *mem) +static int __init get_mem_info(unsigned long *mem, unsigned long *end) { - int i; - struct mem_chunk *chunk_array; + struct memblock_region *reg; - chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), - GFP_KERNEL); - if (!chunk_array) - return -ENOMEM; - detect_memory_layout(chunk_array); - for (i = 0; i < MEMORY_CHUNKS; i++) { - if (chunk_array[i].size == 0) - break; - *mem += chunk_array[i].size; + for_each_memblock(memory, reg) { + *mem += reg->size; + *end = max_t(unsigned long, *end, reg->base + reg->size); } - kfree(chunk_array); return 0; } -static int __init zcore_header_init(int arch, struct zcore_header *hdr) +static void __init zcore_header_init(int arch, struct zcore_header *hdr, + unsigned long mem_size) { - int rc, i; - unsigned long memory = 0; u32 prefix; + int i; if (arch == ARCH_S390X) hdr->arch_id = DUMP_ARCH_S390X; else hdr->arch_id = DUMP_ARCH_S390; - rc = get_mem_size(&memory); - if (rc) - return rc; - hdr->mem_size = memory; - hdr->rmem_size = memory; + hdr->mem_size = mem_size; + hdr->rmem_size = mem_size; hdr->mem_end = sys_info.mem_size; - hdr->num_pages = memory / PAGE_SIZE; - hdr->tod = get_clock(); + hdr->num_pages = mem_size / PAGE_SIZE; + hdr->tod = get_tod_clock(); get_cpu_id(&hdr->cpu_id); - for (i = 0; zfcpdump_save_areas[i]; i++) { - prefix = zfcpdump_save_areas[i]->pref_reg; + for (i = 0; i < dump_save_areas.count; i++) { + prefix = dump_save_areas.areas[i]->pref_reg; hdr->real_cpu_cnt++; if (!prefix) continue; hdr->lc_vec[hdr->cpu_cnt] = prefix; hdr->cpu_cnt++; } - return 0; } /* @@ -660,15 +638,11 @@ static int __init zcore_reipl_init(void) ipl_block = (void *) __get_free_page(GFP_KERNEL); if (!ipl_block) return -ENOMEM; - if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE) + if (ipib_info.ipib < sclp_get_hsa_size()) rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); else - rc = memcpy_real(ipl_block, ipib_info.ipib, PAGE_SIZE); - if (rc) { - free_page((unsigned long) ipl_block); - return rc; - } - if (csum_partial(ipl_block, ipl_block->hdr.len, 0) != + rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); + if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) != ipib_info.checksum) { TRACE("Checksum does not match\n"); free_page((unsigned long) ipl_block); @@ -679,11 +653,15 @@ static int __init zcore_reipl_init(void) static int __init zcore_init(void) { + unsigned long mem_size, mem_end; unsigned char arch; int rc; + mem_size = mem_end = 0; if (ipl_info.type != IPL_TYPE_FCP_DUMP) return -ENODATA; + if (OLDMEM_BASE) + return -ENODATA; zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); debug_register_view(zcore_dbf, &debug_sprintf_view); @@ -700,6 +678,7 @@ static int __init zcore_init(void) rc = check_sdias(); if (rc) goto fail; + hsa_available = 1; rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); if (rc) @@ -721,13 +700,14 @@ static int __init zcore_init(void) } #endif /* CONFIG_64BIT */ - rc = sys_info_init(arch); + rc = get_mem_info(&mem_size, &mem_end); if (rc) goto fail; - rc = zcore_header_init(arch, &zcore_header); + rc = sys_info_init(arch, mem_end); if (rc) goto fail; + zcore_header_init(arch, &zcore_header, mem_size); rc = zcore_reipl_init(); if (rc) @@ -756,9 +736,16 @@ static int __init zcore_init(void) rc = -ENOMEM; goto fail_memmap_file; } - hsa_available = 1; + zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir, + NULL, &zcore_hsa_fops); + if (!zcore_hsa_file) { + rc = -ENOMEM; + goto fail_reipl_file; + } return 0; +fail_reipl_file: + debugfs_remove(zcore_reipl_file); fail_memmap_file: debugfs_remove(zcore_memmap_file); fail_file: @@ -775,6 +762,7 @@ static void __exit zcore_exit(void) debug_unregister(zcore_dbf); sclp_sdias_exit(); free_page((unsigned long) ipl_block); + debugfs_remove(zcore_hsa_file); debugfs_remove(zcore_reipl_file); debugfs_remove(zcore_memmap_file); debugfs_remove(zcore_file); diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index e1b700a1964..8c4a386e97f 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -8,6 +8,8 @@ ccw_device-objs += device.o device_fsm.o device_ops.o ccw_device-objs += device_id.o device_pgid.o device_status.o obj-y += ccw_device.o cmf.o obj-$(CONFIG_CHSC_SCH) += chsc_sch.o +obj-$(CONFIG_EADM_SCH) += eadm_sch.o +obj-$(CONFIG_SCM_BUS) += scm.o obj-$(CONFIG_CCWGROUP) += ccwgroup.o qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index 65d2e769dfa..00bfbee0af9 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -1,8 +1,7 @@ /* - * drivers/s390/cio/airq.c * Support for adapter interruptions * - * Copyright IBM Corp. 1999,2007 + * Copyright IBM Corp. 1999, 2007 * Author(s): Ingo Adlung <adlung@de.ibm.com> * Cornelia Huck <cornelia.huck@de.ibm.com> * Arnd Bergmann <arndb@de.ibm.com> @@ -10,142 +9,267 @@ */ #include <linux/init.h> +#include <linux/irq.h> +#include <linux/kernel_stat.h> #include <linux/module.h> +#include <linux/mutex.h> +#include <linux/rculist.h> #include <linux/slab.h> -#include <linux/rcupdate.h> #include <asm/airq.h> #include <asm/isc.h> #include "cio.h" #include "cio_debug.h" +#include "ioasm.h" -#define NR_AIRQS 32 -#define NR_AIRQS_PER_WORD sizeof(unsigned long) -#define NR_AIRQ_WORDS (NR_AIRQS / NR_AIRQS_PER_WORD) +static DEFINE_SPINLOCK(airq_lists_lock); +static struct hlist_head airq_lists[MAX_ISC+1]; -union indicator_t { - unsigned long word[NR_AIRQ_WORDS]; - unsigned char byte[NR_AIRQS]; -} __attribute__((packed)); +/** + * register_adapter_interrupt() - register adapter interrupt handler + * @airq: pointer to adapter interrupt descriptor + * + * Returns 0 on success, or -EINVAL. + */ +int register_adapter_interrupt(struct airq_struct *airq) +{ + char dbf_txt[32]; -struct airq_t { - adapter_int_handler_t handler; - void *drv_data; -}; + if (!airq->handler || airq->isc > MAX_ISC) + return -EINVAL; + if (!airq->lsi_ptr) { + airq->lsi_ptr = kzalloc(1, GFP_KERNEL); + if (!airq->lsi_ptr) + return -ENOMEM; + airq->flags |= AIRQ_PTR_ALLOCATED; + } + if (!airq->lsi_mask) + airq->lsi_mask = 0xff; + snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq); + CIO_TRACE_EVENT(4, dbf_txt); + isc_register(airq->isc); + spin_lock(&airq_lists_lock); + hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]); + spin_unlock(&airq_lists_lock); + return 0; +} +EXPORT_SYMBOL(register_adapter_interrupt); -static union indicator_t indicators[MAX_ISC+1]; -static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS]; +/** + * unregister_adapter_interrupt - unregister adapter interrupt handler + * @airq: pointer to adapter interrupt descriptor + */ +void unregister_adapter_interrupt(struct airq_struct *airq) +{ + char dbf_txt[32]; -static int register_airq(struct airq_t *airq, u8 isc) + if (hlist_unhashed(&airq->list)) + return; + snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq); + CIO_TRACE_EVENT(4, dbf_txt); + spin_lock(&airq_lists_lock); + hlist_del_rcu(&airq->list); + spin_unlock(&airq_lists_lock); + synchronize_rcu(); + isc_unregister(airq->isc); + if (airq->flags & AIRQ_PTR_ALLOCATED) { + kfree(airq->lsi_ptr); + airq->lsi_ptr = NULL; + airq->flags &= ~AIRQ_PTR_ALLOCATED; + } +} +EXPORT_SYMBOL(unregister_adapter_interrupt); + +static irqreturn_t do_airq_interrupt(int irq, void *dummy) { - int i; + struct tpi_info *tpi_info; + struct airq_struct *airq; + struct hlist_head *head; + + __this_cpu_write(s390_idle.nohz_delay, 1); + tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; + head = &airq_lists[tpi_info->isc]; + rcu_read_lock(); + hlist_for_each_entry_rcu(airq, head, list) + if ((*airq->lsi_ptr & airq->lsi_mask) != 0) + airq->handler(airq); + rcu_read_unlock(); + + return IRQ_HANDLED; +} + +static struct irqaction airq_interrupt = { + .name = "AIO", + .handler = do_airq_interrupt, +}; - for (i = 0; i < NR_AIRQS; i++) - if (!cmpxchg(&airqs[isc][i], NULL, airq)) - return i; - return -ENOMEM; +void __init init_airq_interrupts(void) +{ + irq_set_chip_and_handler(THIN_INTERRUPT, + &dummy_irq_chip, handle_percpu_irq); + setup_irq(THIN_INTERRUPT, &airq_interrupt); } /** - * s390_register_adapter_interrupt() - register adapter interrupt handler - * @handler: adapter handler to be registered - * @drv_data: driver data passed with each call to the handler - * @isc: isc for which the handler should be called + * airq_iv_create - create an interrupt vector + * @bits: number of bits in the interrupt vector + * @flags: allocation flags * - * Returns: - * Pointer to the indicator to be used on success - * ERR_PTR() if registration failed + * Returns a pointer to an interrupt vector structure */ -void *s390_register_adapter_interrupt(adapter_int_handler_t handler, - void *drv_data, u8 isc) +struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags) { - struct airq_t *airq; - char dbf_txt[16]; - int ret; - - if (isc > MAX_ISC) - return ERR_PTR(-EINVAL); - airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); - if (!airq) { - ret = -ENOMEM; + struct airq_iv *iv; + unsigned long size; + + iv = kzalloc(sizeof(*iv), GFP_KERNEL); + if (!iv) goto out; + iv->bits = bits; + size = BITS_TO_LONGS(bits) * sizeof(unsigned long); + iv->vector = kzalloc(size, GFP_KERNEL); + if (!iv->vector) + goto out_free; + if (flags & AIRQ_IV_ALLOC) { + iv->avail = kmalloc(size, GFP_KERNEL); + if (!iv->avail) + goto out_free; + memset(iv->avail, 0xff, size); + iv->end = 0; + } else + iv->end = bits; + if (flags & AIRQ_IV_BITLOCK) { + iv->bitlock = kzalloc(size, GFP_KERNEL); + if (!iv->bitlock) + goto out_free; + } + if (flags & AIRQ_IV_PTR) { + size = bits * sizeof(unsigned long); + iv->ptr = kzalloc(size, GFP_KERNEL); + if (!iv->ptr) + goto out_free; + } + if (flags & AIRQ_IV_DATA) { + size = bits * sizeof(unsigned int); + iv->data = kzalloc(size, GFP_KERNEL); + if (!iv->data) + goto out_free; } - airq->handler = handler; - airq->drv_data = drv_data; + spin_lock_init(&iv->lock); + return iv; - ret = register_airq(airq, isc); +out_free: + kfree(iv->ptr); + kfree(iv->bitlock); + kfree(iv->avail); + kfree(iv->vector); + kfree(iv); out: - snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); - CIO_TRACE_EVENT(4, dbf_txt); - if (ret < 0) { - kfree(airq); - return ERR_PTR(ret); - } else - return &indicators[isc].byte[ret]; + return NULL; } -EXPORT_SYMBOL(s390_register_adapter_interrupt); +EXPORT_SYMBOL(airq_iv_create); /** - * s390_unregister_adapter_interrupt - unregister adapter interrupt handler - * @ind: indicator for which the handler is to be unregistered - * @isc: interruption subclass + * airq_iv_release - release an interrupt vector + * @iv: pointer to interrupt vector structure */ -void s390_unregister_adapter_interrupt(void *ind, u8 isc) +void airq_iv_release(struct airq_iv *iv) { - struct airq_t *airq; - char dbf_txt[16]; - int i; - - i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]); - snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); - CIO_TRACE_EVENT(4, dbf_txt); - indicators[isc].byte[i] = 0; - airq = xchg(&airqs[isc][i], NULL); - /* - * Allow interrupts to complete. This will ensure that the airq handle - * is no longer referenced by any interrupt handler. - */ - synchronize_sched(); - kfree(airq); + kfree(iv->data); + kfree(iv->ptr); + kfree(iv->bitlock); + kfree(iv->vector); + kfree(iv->avail); + kfree(iv); } -EXPORT_SYMBOL(s390_unregister_adapter_interrupt); - -#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) +EXPORT_SYMBOL(airq_iv_release); -void do_adapter_IO(u8 isc) +/** + * airq_iv_alloc - allocate irq bits from an interrupt vector + * @iv: pointer to an interrupt vector structure + * @num: number of consecutive irq bits to allocate + * + * Returns the bit number of the first irq in the allocated block of irqs, + * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been + * specified + */ +unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num) { - int w; - int i; - unsigned long word; - struct airq_t *airq; - - /* - * Access indicator array in word-sized chunks to minimize storage - * fetch operations. - */ - for (w = 0; w < NR_AIRQ_WORDS; w++) { - word = indicators[isc].word[w]; - i = w * NR_AIRQS_PER_WORD; - /* - * Check bytes within word for active indicators. - */ - while (word) { - if (word & INDICATOR_MASK) { - airq = airqs[isc][i]; - /* Make sure gcc reads from airqs only once. */ - barrier(); - if (likely(airq)) - airq->handler(&indicators[isc].byte[i], - airq->drv_data); - else - /* - * Reset ill-behaved indicator. - */ - indicators[isc].byte[i] = 0; - } - word <<= 8; - i++; + unsigned long bit, i, flags; + + if (!iv->avail || num == 0) + return -1UL; + spin_lock_irqsave(&iv->lock, flags); + bit = find_first_bit_inv(iv->avail, iv->bits); + while (bit + num <= iv->bits) { + for (i = 1; i < num; i++) + if (!test_bit_inv(bit + i, iv->avail)) + break; + if (i >= num) { + /* Found a suitable block of irqs */ + for (i = 0; i < num; i++) + clear_bit_inv(bit + i, iv->avail); + if (bit + num >= iv->end) + iv->end = bit + num + 1; + break; } + bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1); + } + if (bit + num > iv->bits) + bit = -1UL; + spin_unlock_irqrestore(&iv->lock, flags); + return bit; +} +EXPORT_SYMBOL(airq_iv_alloc); + +/** + * airq_iv_free - free irq bits of an interrupt vector + * @iv: pointer to interrupt vector structure + * @bit: number of the first irq bit to free + * @num: number of consecutive irq bits to free + */ +void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num) +{ + unsigned long i, flags; + + if (!iv->avail || num == 0) + return; + spin_lock_irqsave(&iv->lock, flags); + for (i = 0; i < num; i++) { + /* Clear (possibly left over) interrupt bit */ + clear_bit_inv(bit + i, iv->vector); + /* Make the bit positions available again */ + set_bit_inv(bit + i, iv->avail); + } + if (bit + num >= iv->end) { + /* Find new end of bit-field */ + while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail)) + iv->end--; } + spin_unlock_irqrestore(&iv->lock, flags); +} +EXPORT_SYMBOL(airq_iv_free); + +/** + * airq_iv_scan - scan interrupt vector for non-zero bits + * @iv: pointer to interrupt vector structure + * @start: bit number to start the search + * @end: bit number to end the search + * + * Returns the bit number of the next non-zero interrupt bit, or + * -1UL if the scan completed without finding any more any non-zero bits. + */ +unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, + unsigned long end) +{ + unsigned long bit; + + /* Find non-zero bit starting from 'ivs->next'. */ + bit = find_next_bit_inv(iv->vector, end, start); + if (bit >= end) + return -1UL; + clear_bit_inv(bit, iv->vector); + return bit; } +EXPORT_SYMBOL(airq_iv_scan); diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 7eab9ab9f40..b3f791b2c1f 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -1,9 +1,7 @@ /* - * drivers/s390/cio/blacklist.c * S/390 common I/O routines -- blacklisting of specific devices * - * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 1999, 2013 * Author(s): Ingo Adlung (adlung@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) @@ -14,14 +12,14 @@ #include <linux/init.h> #include <linux/vmalloc.h> -#include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/ctype.h> #include <linux/device.h> -#include <asm/cio.h> #include <asm/uaccess.h> +#include <asm/cio.h> +#include <asm/ipl.h> #include "blacklist.h" #include "cio.h" @@ -80,17 +78,15 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit, int max_digit, int max_val) { int diff; - unsigned int value; diff = 0; *val = 0; - while (isxdigit(**cp) && (diff <= max_digit)) { + while (diff <= max_digit) { + int value = hex_to_bin(**cp); - if (isdigit(**cp)) - value = **cp - '0'; - else - value = tolower(**cp) - 'a' + 10; + if (value < 0) + break; *val = *val * 16 + value; (*cp)++; diff++; @@ -177,6 +173,29 @@ static int blacklist_parse_parameters(char *str, range_action action, to_cssid = __MAX_CSSID; to_ssid = __MAX_SSID; to = __MAX_SUBCHANNEL; + } else if (strcmp(parm, "ipldev") == 0) { + if (ipl_info.type == IPL_TYPE_CCW) { + from_cssid = 0; + from_ssid = ipl_info.data.ccw.dev_id.ssid; + from = ipl_info.data.ccw.dev_id.devno; + } else if (ipl_info.type == IPL_TYPE_FCP || + ipl_info.type == IPL_TYPE_FCP_DUMP) { + from_cssid = 0; + from_ssid = ipl_info.data.fcp.dev_id.ssid; + from = ipl_info.data.fcp.dev_id.devno; + } else { + continue; + } + to_cssid = from_cssid; + to_ssid = from_ssid; + to = from; + } else if (strcmp(parm, "condev") == 0) { + if (console_devno == -1) + continue; + + from_cssid = to_cssid = 0; + from_ssid = to_ssid = 0; + from = to = console_devno; } else { rc = parse_busid(strsep(&parm, "-"), &from_cssid, &from_ssid, &from, msgtrigger); @@ -241,16 +260,16 @@ static int blacklist_parse_proc_parameters(char *buf) parm = strsep(&buf, " "); - if (strcmp("free", parm) == 0) + if (strcmp("free", parm) == 0) { rc = blacklist_parse_parameters(buf, free, 0); - else if (strcmp("add", parm) == 0) + css_schedule_eval_all_unreg(0); + } else if (strcmp("add", parm) == 0) rc = blacklist_parse_parameters(buf, add, 0); else if (strcmp("purge", parm) == 0) return ccw_purge_blacklisted(); else return -EINVAL; - css_schedule_reprobe(); return rc; } @@ -338,10 +357,9 @@ cio_ignore_write(struct file *file, const char __user *user_buf, return -EINVAL; if (user_len > 65536) user_len = 65536; - buf = vmalloc (user_len + 1); /* maybe better use the stack? */ + buf = vzalloc(user_len + 1); /* maybe better use the stack? */ if (buf == NULL) return -ENOMEM; - memset(buf, 0, user_len + 1); if (strncpy_from_user (buf, user_buf, user_len) < 0) { rc = -EFAULT; diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 5f97ea2ee6b..e443b0d0b23 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -1,7 +1,7 @@ /* * bus driver for ccwgroup * - * Copyright IBM Corp. 2002, 2009 + * Copyright IBM Corp. 2002, 2012 * * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) @@ -15,10 +15,13 @@ #include <linux/ctype.h> #include <linux/dcache.h> +#include <asm/cio.h> #include <asm/ccwdev.h> #include <asm/ccwgroup.h> -#define CCW_BUS_ID_SIZE 20 +#include "device.h" + +#define CCW_BUS_ID_SIZE 10 /* In Linux 2.4, we had a channel device layer called "chandev" * that did all sorts of obscure stuff for networking devices. @@ -27,68 +30,161 @@ * to devices that use multiple subchannels. */ -/* a device matches a driver if all its slave devices match the same - * entry of the driver */ -static int -ccwgroup_bus_match (struct device * dev, struct device_driver * drv) +static struct bus_type ccwgroup_bus_type; + +static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) { - struct ccwgroup_device *gdev; - struct ccwgroup_driver *gdrv; + int i; + char str[8]; - gdev = to_ccwgroupdev(dev); - gdrv = to_ccwgroupdrv(drv); + for (i = 0; i < gdev->count; i++) { + sprintf(str, "cdev%d", i); + sysfs_remove_link(&gdev->dev.kobj, str); + sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); + } +} - if (gdev->creator_id == gdrv->driver_id) - return 1; +/* + * Remove references from ccw devices to ccw group device and from + * ccw group device to ccw devices. + */ +static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev) +{ + struct ccw_device *cdev; + int i; - return 0; + for (i = 0; i < gdev->count; i++) { + cdev = gdev->cdev[i]; + if (!cdev) + continue; + spin_lock_irq(cdev->ccwlock); + dev_set_drvdata(&cdev->dev, NULL); + spin_unlock_irq(cdev->ccwlock); + gdev->cdev[i] = NULL; + put_device(&cdev->dev); + } } -static int -ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env) + +/** + * ccwgroup_set_online() - enable a ccwgroup device + * @gdev: target ccwgroup device + * + * This function attempts to put the ccwgroup device into the online state. + * Returns: + * %0 on success and a negative error value on failure. + */ +int ccwgroup_set_online(struct ccwgroup_device *gdev) { - /* TODO */ - return 0; + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); + int ret = -EINVAL; + + if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) + return -EAGAIN; + if (gdev->state == CCWGROUP_ONLINE) + goto out; + if (gdrv->set_online) + ret = gdrv->set_online(gdev); + if (ret) + goto out; + + gdev->state = CCWGROUP_ONLINE; +out: + atomic_set(&gdev->onoff, 0); + return ret; } +EXPORT_SYMBOL(ccwgroup_set_online); -static struct bus_type ccwgroup_bus_type; +/** + * ccwgroup_set_offline() - disable a ccwgroup device + * @gdev: target ccwgroup device + * + * This function attempts to put the ccwgroup device into the offline state. + * Returns: + * %0 on success and a negative error value on failure. + */ +int ccwgroup_set_offline(struct ccwgroup_device *gdev) +{ + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); + int ret = -EINVAL; + + if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) + return -EAGAIN; + if (gdev->state == CCWGROUP_OFFLINE) + goto out; + if (gdrv->set_offline) + ret = gdrv->set_offline(gdev); + if (ret) + goto out; -static void -__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) + gdev->state = CCWGROUP_OFFLINE; +out: + atomic_set(&gdev->onoff, 0); + return ret; +} +EXPORT_SYMBOL(ccwgroup_set_offline); + +static ssize_t ccwgroup_online_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { - int i; - char str[8]; + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + unsigned long value; + int ret; - for (i = 0; i < gdev->count; i++) { - sprintf(str, "cdev%d", i); - sysfs_remove_link(&gdev->dev.kobj, str); - sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); + device_lock(dev); + if (!dev->driver) { + ret = -EINVAL; + goto out; } - + + ret = kstrtoul(buf, 0, &value); + if (ret) + goto out; + + if (value == 1) + ret = ccwgroup_set_online(gdev); + else if (value == 0) + ret = ccwgroup_set_offline(gdev); + else + ret = -EINVAL; +out: + device_unlock(dev); + return (ret == 0) ? count : ret; +} + +static ssize_t ccwgroup_online_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + int online; + + online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0; + + return scnprintf(buf, PAGE_SIZE, "%d\n", online); } /* * Provide an 'ungroup' attribute so the user can remove group devices no * longer needed or accidentially created. Saves memory :) */ -static void ccwgroup_ungroup_callback(struct device *dev) +static void ccwgroup_ungroup(struct ccwgroup_device *gdev) { - struct ccwgroup_device *gdev = to_ccwgroupdev(dev); - mutex_lock(&gdev->reg_mutex); if (device_is_registered(&gdev->dev)) { __ccwgroup_remove_symlinks(gdev); - device_unregister(dev); + device_unregister(&gdev->dev); + __ccwgroup_remove_cdev_refs(gdev); } mutex_unlock(&gdev->reg_mutex); } -static ssize_t -ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static ssize_t ccwgroup_ungroup_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { - struct ccwgroup_device *gdev; - int rc; - - gdev = to_ccwgroupdev(dev); + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + int rc = 0; /* Prevent concurrent online/offline processing and ungrouping. */ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) @@ -97,49 +193,57 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const rc = -EINVAL; goto out; } - /* Note that we cannot unregister the device from one of its - * attribute methods, so we have to use this roundabout approach. - */ - rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); + + if (device_remove_file_self(dev, attr)) + ccwgroup_ungroup(gdev); + else + rc = -ENODEV; out: if (rc) { - if (rc != -EAGAIN) - /* Release onoff "lock" when ungrouping failed. */ - atomic_set(&gdev->onoff, 0); + /* Release onoff "lock" when ungrouping failed. */ + atomic_set(&gdev->onoff, 0); return rc; } return count; } - static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store); +static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store); + +static struct attribute *ccwgroup_attrs[] = { + &dev_attr_online.attr, + &dev_attr_ungroup.attr, + NULL, +}; +static struct attribute_group ccwgroup_attr_group = { + .attrs = ccwgroup_attrs, +}; +static const struct attribute_group *ccwgroup_attr_groups[] = { + &ccwgroup_attr_group, + NULL, +}; -static void -ccwgroup_release (struct device *dev) +static void ccwgroup_ungroup_workfn(struct work_struct *work) { - struct ccwgroup_device *gdev; - int i; + struct ccwgroup_device *gdev = + container_of(work, struct ccwgroup_device, ungroup_work); - gdev = to_ccwgroupdev(dev); + ccwgroup_ungroup(gdev); + put_device(&gdev->dev); +} - for (i = 0; i < gdev->count; i++) { - if (gdev->cdev[i]) { - if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) - dev_set_drvdata(&gdev->cdev[i]->dev, NULL); - put_device(&gdev->cdev[i]->dev); - } - } - kfree(gdev); +static void ccwgroup_release(struct device *dev) +{ + kfree(to_ccwgroupdev(dev)); } -static int -__ccwgroup_create_symlinks(struct ccwgroup_device *gdev) +static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) { char str[8]; int i, rc; for (i = 0; i < gdev->count; i++) { - rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj, - "group_device"); + rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, + &gdev->dev.kobj, "group_device"); if (rc) { for (--i; i >= 0; i--) sysfs_remove_link(&gdev->cdev[i]->dev.kobj, @@ -149,8 +253,8 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) } for (i = 0; i < gdev->count; i++) { sprintf(str, "cdev%d", i); - rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj, - str); + rc = sysfs_create_link(&gdev->dev.kobj, + &gdev->cdev[i]->dev.kobj, str); if (rc) { for (--i; i >= 0; i--) { sprintf(str, "cdev%d", i); @@ -165,9 +269,10 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) return 0; } -static int __get_next_bus_id(const char **buf, char *bus_id) +static int __get_next_id(const char **buf, struct ccw_dev_id *id) { - int rc, len; + unsigned int cssid, ssid, devno; + int ret = 0, len; char *start, *end; start = (char *)*buf; @@ -182,49 +287,40 @@ static int __get_next_bus_id(const char **buf, char *bus_id) len = end - start + 1; end++; } - if (len < CCW_BUS_ID_SIZE) { - strlcpy(bus_id, start, len); - rc = 0; + if (len <= CCW_BUS_ID_SIZE) { + if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3) + ret = -EINVAL; } else - rc = -EINVAL; - *buf = end; - return rc; -} - -static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE]) -{ - int cssid, ssid, devno; + ret = -EINVAL; - /* Must be of form %x.%x.%04x */ - if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3) - return 0; - return 1; + if (!ret) { + id->ssid = ssid; + id->devno = devno; + } + *buf = end; + return ret; } /** - * ccwgroup_create_from_string() - create and register a ccw group device - * @root: parent device for the new device - * @creator_id: identifier of creating driver - * @cdrv: ccw driver of slave devices + * ccwgroup_create_dev() - create and register a ccw group device + * @parent: parent device for the new device + * @gdrv: driver for the new group device * @num_devices: number of slave devices * @buf: buffer containing comma separated bus ids of slave devices * - * Create and register a new ccw group device as a child of @root. Slave - * devices are obtained from the list of bus ids given in @buf and must all - * belong to @cdrv. + * Create and register a new ccw group device as a child of @parent. Slave + * devices are obtained from the list of bus ids given in @buf. * Returns: * %0 on success and an error code on failure. * Context: * non-atomic */ -int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, - struct ccw_driver *cdrv, int num_devices, - const char *buf) +int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, + int num_devices, const char *buf) { struct ccwgroup_device *gdev; + struct ccw_dev_id dev_id; int rc, i; - char tmp_bus_id[CCW_BUS_ID_SIZE]; - const char *curr_buf; gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), GFP_KERNEL); @@ -234,77 +330,76 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, atomic_set(&gdev->onoff, 0); mutex_init(&gdev->reg_mutex); mutex_lock(&gdev->reg_mutex); - gdev->creator_id = creator_id; + INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn); gdev->count = num_devices; gdev->dev.bus = &ccwgroup_bus_type; - gdev->dev.parent = root; + gdev->dev.parent = parent; gdev->dev.release = ccwgroup_release; device_initialize(&gdev->dev); - curr_buf = buf; - for (i = 0; i < num_devices && curr_buf; i++) { - rc = __get_next_bus_id(&curr_buf, tmp_bus_id); + for (i = 0; i < num_devices && buf; i++) { + rc = __get_next_id(&buf, &dev_id); if (rc != 0) goto error; - if (!__is_valid_bus_id(tmp_bus_id)) { - rc = -EINVAL; - goto error; - } - gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id); + gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id); /* * All devices have to be of the same type in * order to be grouped. */ - if (!gdev->cdev[i] - || gdev->cdev[i]->id.driver_info != + if (!gdev->cdev[i] || !gdev->cdev[i]->drv || + gdev->cdev[i]->drv != gdev->cdev[0]->drv || + gdev->cdev[i]->id.driver_info != gdev->cdev[0]->id.driver_info) { rc = -EINVAL; goto error; } /* Don't allow a device to belong to more than one group. */ + spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev)) { + spin_unlock_irq(gdev->cdev[i]->ccwlock); rc = -EINVAL; goto error; } dev_set_drvdata(&gdev->cdev[i]->dev, gdev); + spin_unlock_irq(gdev->cdev[i]->ccwlock); } /* Check for sufficient number of bus ids. */ - if (i < num_devices && !curr_buf) { + if (i < num_devices) { rc = -EINVAL; goto error; } /* Check for trailing stuff. */ - if (i == num_devices && strlen(curr_buf) > 0) { + if (i == num_devices && strlen(buf) > 0) { rc = -EINVAL; goto error; } dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev)); + gdev->dev.groups = ccwgroup_attr_groups; + if (gdrv) { + gdev->dev.driver = &gdrv->driver; + rc = gdrv->setup ? gdrv->setup(gdev) : 0; + if (rc) + goto error; + } rc = device_add(&gdev->dev); if (rc) goto error; - get_device(&gdev->dev); - rc = device_create_file(&gdev->dev, &dev_attr_ungroup); - + rc = __ccwgroup_create_symlinks(gdev); if (rc) { - device_unregister(&gdev->dev); + device_del(&gdev->dev); goto error; } - - rc = __ccwgroup_create_symlinks(gdev); - if (!rc) { - mutex_unlock(&gdev->reg_mutex); - put_device(&gdev->dev); - return 0; - } - device_remove_file(&gdev->dev, &dev_attr_ungroup); - device_unregister(&gdev->dev); + mutex_unlock(&gdev->reg_mutex); + return 0; error: for (i = 0; i < num_devices; i++) if (gdev->cdev[i]) { + spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) dev_set_drvdata(&gdev->cdev[i]->dev, NULL); + spin_unlock_irq(gdev->cdev[i]->ccwlock); put_device(&gdev->cdev[i]->dev); gdev->cdev[i] = NULL; } @@ -312,10 +407,20 @@ error: put_device(&gdev->dev); return rc; } -EXPORT_SYMBOL(ccwgroup_create_from_string); +EXPORT_SYMBOL(ccwgroup_create_dev); static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, - void *data); + void *data) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(data); + + if (action == BUS_NOTIFY_UNBIND_DRIVER) { + get_device(&gdev->dev); + schedule_work(&gdev->ungroup_work); + } + + return NOTIFY_OK; +} static struct notifier_block ccwgroup_nb = { .notifier_call = ccwgroup_notifier @@ -347,138 +452,13 @@ module_exit(cleanup_ccwgroup); /************************** driver stuff ******************************/ -static int -ccwgroup_set_online(struct ccwgroup_device *gdev) -{ - struct ccwgroup_driver *gdrv; - int ret; - - if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) - return -EAGAIN; - if (gdev->state == CCWGROUP_ONLINE) { - ret = 0; - goto out; - } - if (!gdev->dev.driver) { - ret = -EINVAL; - goto out; - } - gdrv = to_ccwgroupdrv (gdev->dev.driver); - if ((ret = gdrv->set_online ? gdrv->set_online(gdev) : 0)) - goto out; - - gdev->state = CCWGROUP_ONLINE; - out: - atomic_set(&gdev->onoff, 0); - return ret; -} - -static int -ccwgroup_set_offline(struct ccwgroup_device *gdev) -{ - struct ccwgroup_driver *gdrv; - int ret; - - if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) - return -EAGAIN; - if (gdev->state == CCWGROUP_OFFLINE) { - ret = 0; - goto out; - } - if (!gdev->dev.driver) { - ret = -EINVAL; - goto out; - } - gdrv = to_ccwgroupdrv (gdev->dev.driver); - if ((ret = gdrv->set_offline ? gdrv->set_offline(gdev) : 0)) - goto out; - - gdev->state = CCWGROUP_OFFLINE; - out: - atomic_set(&gdev->onoff, 0); - return ret; -} - -static ssize_t -ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct ccwgroup_device *gdev; - struct ccwgroup_driver *gdrv; - unsigned long value; - int ret; - - if (!dev->driver) - return -ENODEV; - - gdev = to_ccwgroupdev(dev); - gdrv = to_ccwgroupdrv(dev->driver); - - if (!try_module_get(gdrv->owner)) - return -EINVAL; - - ret = strict_strtoul(buf, 0, &value); - if (ret) - goto out; - - if (value == 1) - ret = ccwgroup_set_online(gdev); - else if (value == 0) - ret = ccwgroup_set_offline(gdev); - else - ret = -EINVAL; -out: - module_put(gdrv->owner); - return (ret == 0) ? count : ret; -} - -static ssize_t -ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf) -{ - int online; - - online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE); - - return sprintf(buf, online ? "1\n" : "0\n"); -} - -static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store); - -static int -ccwgroup_probe (struct device *dev) -{ - struct ccwgroup_device *gdev; - struct ccwgroup_driver *gdrv; - - int ret; - - gdev = to_ccwgroupdev(dev); - gdrv = to_ccwgroupdrv(dev->driver); - - if ((ret = device_create_file(dev, &dev_attr_online))) - return ret; - - ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV; - if (ret) - device_remove_file(dev, &dev_attr_online); - - return ret; -} - -static int -ccwgroup_remove (struct device *dev) +static int ccwgroup_remove(struct device *dev) { - struct ccwgroup_device *gdev; - struct ccwgroup_driver *gdrv; - - device_remove_file(dev, &dev_attr_online); - device_remove_file(dev, &dev_attr_ungroup); + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); if (!dev->driver) return 0; - - gdev = to_ccwgroupdev(dev); - gdrv = to_ccwgroupdrv(dev->driver); - if (gdrv->remove) gdrv->remove(gdev); @@ -487,15 +467,11 @@ ccwgroup_remove (struct device *dev) static void ccwgroup_shutdown(struct device *dev) { - struct ccwgroup_device *gdev; - struct ccwgroup_driver *gdrv; + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); if (!dev->driver) return; - - gdev = to_ccwgroupdev(dev); - gdrv = to_ccwgroupdrv(dev->driver); - if (gdrv->shutdown) gdrv->shutdown(gdev); } @@ -570,27 +546,11 @@ static const struct dev_pm_ops ccwgroup_pm_ops = { static struct bus_type ccwgroup_bus_type = { .name = "ccwgroup", - .match = ccwgroup_bus_match, - .uevent = ccwgroup_uevent, - .probe = ccwgroup_probe, .remove = ccwgroup_remove, .shutdown = ccwgroup_shutdown, .pm = &ccwgroup_pm_ops, }; - -static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, - void *data) -{ - struct device *dev = data; - - if (action == BUS_NOTIFY_UNBIND_DRIVER) - device_schedule_callback(dev, ccwgroup_ungroup_callback); - - return NOTIFY_OK; -} - - /** * ccwgroup_driver_register() - register a ccw group driver * @cdriver: driver to be registered @@ -601,14 +561,12 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver) { /* register our new driver with the core */ cdriver->driver.bus = &ccwgroup_bus_type; - cdriver->driver.name = cdriver->name; - cdriver->driver.owner = cdriver->owner; return driver_register(&cdriver->driver); } +EXPORT_SYMBOL(ccwgroup_driver_register); -static int -__ccwgroup_match_all(struct device *dev, void *data) +static int __ccwgroup_match_all(struct device *dev, void *data) { return 1; } @@ -624,20 +582,16 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) struct device *dev; /* We don't want ccwgroup devices to live longer than their driver. */ - get_driver(&cdriver->driver); while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, __ccwgroup_match_all))) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); - mutex_lock(&gdev->reg_mutex); - __ccwgroup_remove_symlinks(gdev); - device_unregister(dev); - mutex_unlock(&gdev->reg_mutex); + ccwgroup_ungroup(gdev); put_device(dev); } - put_driver(&cdriver->driver); driver_unregister(&cdriver->driver); } +EXPORT_SYMBOL(ccwgroup_driver_unregister); /** * ccwgroup_probe_ccwdev() - probe function for slave devices @@ -652,25 +606,7 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev) { return 0; } - -static struct ccwgroup_device * -__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) -{ - struct ccwgroup_device *gdev; - - gdev = dev_get_drvdata(&cdev->dev); - if (gdev) { - if (get_device(&gdev->dev)) { - mutex_lock(&gdev->reg_mutex); - if (device_is_registered(&gdev->dev)) - return gdev; - mutex_unlock(&gdev->reg_mutex); - put_device(&gdev->dev); - } - return NULL; - } - return NULL; -} +EXPORT_SYMBOL(ccwgroup_probe_ccwdev); /** * ccwgroup_remove_ccwdev() - remove function for slave devices @@ -687,17 +623,19 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev) /* Ignore offlining errors, device is gone anyway. */ ccw_device_set_offline(cdev); /* If one of its devices is gone, the whole group is done for. */ - gdev = __ccwgroup_get_gdev_by_cdev(cdev); - if (gdev) { - __ccwgroup_remove_symlinks(gdev); - device_unregister(&gdev->dev); - mutex_unlock(&gdev->reg_mutex); - put_device(&gdev->dev); + spin_lock_irq(cdev->ccwlock); + gdev = dev_get_drvdata(&cdev->dev); + if (!gdev) { + spin_unlock_irq(cdev->ccwlock); + return; } + /* Get ccwgroup device reference for local processing. */ + get_device(&gdev->dev); + spin_unlock_irq(cdev->ccwlock); + /* Unregister group device. */ + ccwgroup_ungroup(gdev); + /* Release ccwgroup device reference for local processing. */ + put_device(&gdev->dev); } - -MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(ccwgroup_driver_register); -EXPORT_SYMBOL(ccwgroup_driver_unregister); -EXPORT_SYMBOL(ccwgroup_probe_ccwdev); EXPORT_SYMBOL(ccwgroup_remove_ccwdev); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 37df42af05e..07676c22d51 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c @@ -1,10 +1,13 @@ /* * Handling of internal CCW device requests. * - * Copyright IBM Corp. 2009 + * Copyright IBM Corp. 2009, 2011 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ +#define KMSG_COMPONENT "cio" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/types.h> #include <linux/err.h> #include <asm/ccwdev.h> @@ -38,9 +41,13 @@ static u16 ccwreq_next_path(struct ccw_device *cdev) { struct ccw_request *req = &cdev->private->req; + if (!req->singlepath) { + req->mask = 0; + goto out; + } req->retries = req->maxretries; - req->mask = lpm_adjust(req->mask >>= 1, req->lpm); - + req->mask = lpm_adjust(req->mask >> 1, req->lpm); +out: return req->mask; } @@ -113,8 +120,12 @@ void ccw_request_start(struct ccw_device *cdev) { struct ccw_request *req = &cdev->private->req; - /* Try all paths twice to counter link flapping. */ - req->mask = 0x8080; + if (req->singlepath) { + /* Try all paths twice to counter link flapping. */ + req->mask = 0x8080; + } else + req->mask = req->lpm; + req->retries = req->maxretries; req->mask = lpm_adjust(req->mask, req->lpm); req->drc = 0; @@ -159,6 +170,7 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) { struct irb *irb = &cdev->private->irb; struct cmd_scsw *scsw = &irb->scsw.cmd; + enum uc_todo todo; /* Perform BASIC SENSE if needed. */ if (ccw_device_accumulate_and_sense(cdev, lcirb)) @@ -178,6 +190,22 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) /* Check for command reject. */ if (irb->ecw[0] & SNS0_CMD_REJECT) return IO_REJECTED; + /* Ask the driver what to do */ + if (cdev->drv && cdev->drv->uc_handler) { + todo = cdev->drv->uc_handler(cdev, lcirb); + CIO_TRACE_EVENT(2, "uc_response"); + CIO_HEX_EVENT(2, &todo, sizeof(todo)); + switch (todo) { + case UC_TODO_RETRY: + return IO_STATUS_ERROR; + case UC_TODO_RETRY_ON_NEW_PATH: + return IO_PATH_ERROR; + case UC_TODO_STOP: + return IO_REJECTED; + default: + return IO_STATUS_ERROR; + } + } /* Assume that unexpected SENSE data implies an error. */ return IO_STATUS_ERROR; } @@ -224,7 +252,7 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) */ void ccw_request_handler(struct ccw_device *cdev) { - struct irb *irb = (struct irb *)&S390_lowcore.irb; + struct irb *irb = &__get_cpu_var(cio_irb); struct ccw_request *req = &cdev->private->req; enum io_status status; int rc = -EOPNOTSUPP; @@ -298,7 +326,21 @@ void ccw_request_timeout(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; - int rc; + int rc = -ENODEV, chp; + + if (cio_update_schib(sch)) + goto err; + + for (chp = 0; chp < 8; chp++) { + if ((0x80 >> chp) & sch->schib.pmcw.lpum) + pr_warning("%s: No interrupt was received within %lus " + "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n", + dev_name(&cdev->dev), req->timeout / HZ, + scsw_cstat(&sch->schib.scsw), + scsw_dstat(&sch->schib.scsw), + sch->schid.cssid, + sch->schib.pmcw.chpid[chp]); + } if (!ccwreq_next_path(cdev)) { /* set the final return code for this request */ @@ -317,7 +359,7 @@ err: * ccw_request_notoper - notoper handler for I/O request procedure * @cdev: ccw device * - * Handle timeout during I/O request procedure. + * Handle notoper during I/O request procedure. */ void ccw_request_notoper(struct ccw_device *cdev) { diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index c268a2e5b7c..d497aa05a72 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -1,7 +1,5 @@ /* - * drivers/s390/cio/chp.c - * - * Copyright IBM Corp. 1999,2007 + * Copyright IBM Corp. 1999, 2010 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> @@ -10,11 +8,14 @@ #include <linux/bug.h> #include <linux/workqueue.h> #include <linux/spinlock.h> +#include <linux/export.h> +#include <linux/sched.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/wait.h> #include <linux/mutex.h> #include <linux/errno.h> +#include <linux/slab.h> #include <asm/chpid.h> #include <asm/sclp.h> #include <asm/crw.h> @@ -53,12 +54,6 @@ static struct work_struct cfg_work; /* Wait queue for configure completion events. */ static wait_queue_head_t cfg_wait_queue; -/* Return channel_path struct for given chpid. */ -static inline struct channel_path *chpid_to_chp(struct chp_id chpid) -{ - return channel_subsystems[chpid.cssid]->chps[chpid.id]; -} - /* Set vary state for given chpid. */ static void set_chp_logically_online(struct chp_id chpid, int onoff) { @@ -134,7 +129,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on) /* * Channel measurement related functions */ -static ssize_t chp_measurement_chars_read(struct kobject *kobj, +static ssize_t chp_measurement_chars_read(struct file *filp, + struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -181,7 +177,7 @@ static void chp_measurement_copy_block(struct cmg_entry *buf, } while (reference_buf.values[0] != buf->values[0]); } -static ssize_t chp_measurement_read(struct kobject *kobj, +static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -239,11 +235,13 @@ static ssize_t chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct channel_path *chp = to_channelpath(dev); + int status; - if (!chp) - return 0; - return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") : - sprintf(buf, "offline\n")); + mutex_lock(&chp->lock); + status = chp->state; + mutex_unlock(&chp->lock); + + return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n"); } static ssize_t chp_status_write(struct device *dev, @@ -259,15 +257,18 @@ static ssize_t chp_status_write(struct device *dev, if (!num_args) return count; - if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) + if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) { + mutex_lock(&cp->lock); error = s390_vary_chpid(cp->chpid, 1); - else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) + mutex_unlock(&cp->lock); + } else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) { + mutex_lock(&cp->lock); error = s390_vary_chpid(cp->chpid, 0); - else + mutex_unlock(&cp->lock); + } else error = -EINVAL; return error < 0 ? error : count; - } static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); @@ -313,10 +314,12 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct channel_path *chp = to_channelpath(dev); + u8 type; - if (!chp) - return 0; - return sprintf(buf, "%x\n", chp->desc.desc); + mutex_lock(&chp->lock); + type = chp->desc.desc; + mutex_unlock(&chp->lock); + return sprintf(buf, "%x\n", type); } static DEVICE_ATTR(type, 0444, chp_type_show, NULL); @@ -349,18 +352,57 @@ static ssize_t chp_shared_show(struct device *dev, static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); +static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + ssize_t rc; + + mutex_lock(&chp->lock); + if (chp->desc_fmt1.flags & 0x10) + rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid); + else + rc = 0; + mutex_unlock(&chp->lock); + + return rc; +} +static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL); + +static ssize_t chp_chid_external_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + ssize_t rc; + + mutex_lock(&chp->lock); + if (chp->desc_fmt1.flags & 0x10) + rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0); + else + rc = 0; + mutex_unlock(&chp->lock); + + return rc; +} +static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL); + static struct attribute *chp_attrs[] = { &dev_attr_status.attr, &dev_attr_configure.attr, &dev_attr_type.attr, &dev_attr_cmg.attr, &dev_attr_shared.attr, + &dev_attr_chid.attr, + &dev_attr_chid_external.attr, NULL, }; - static struct attribute_group chp_attr_group = { .attrs = chp_attrs, }; +static const struct attribute_group *chp_attr_groups[] = { + &chp_attr_group, + NULL, +}; static void chp_release(struct device *dev) { @@ -371,6 +413,26 @@ static void chp_release(struct device *dev) } /** + * chp_update_desc - update channel-path description + * @chp - channel-path + * + * Update the channel-path description of the specified channel-path. + * Return zero on success, non-zero otherwise. + */ +int chp_update_desc(struct channel_path *chp) +{ + int rc; + + rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc); + if (rc) + return rc; + + rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); + + return rc; +} + +/** * chp_new - register a new channel-path * @chpid - channel-path ID * @@ -392,10 +454,12 @@ int chp_new(struct chp_id chpid) chp->chpid = chpid; chp->state = 1; chp->dev.parent = &channel_subsystems[chpid.cssid]->device; + chp->dev.groups = chp_attr_groups; chp->dev.release = chp_release; + mutex_init(&chp->lock); /* Obtain channel path description and fill it in. */ - ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); + ret = chp_update_desc(chp); if (ret) goto out_free; if ((chp->desc.flags & 0x80) == 0) { @@ -420,16 +484,10 @@ int chp_new(struct chp_id chpid) put_device(&chp->dev); goto out; } - ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); - if (ret) { - device_unregister(&chp->dev); - goto out; - } mutex_lock(&channel_subsystems[chpid.cssid]->mutex); if (channel_subsystems[chpid.cssid]->cm_enabled) { ret = chp_add_cmg_attr(chp); if (ret) { - sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); device_unregister(&chp->dev); mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); goto out; @@ -451,7 +509,7 @@ out: * On success return a newly allocated copy of the channel-path description * data associated with the given channel-path ID. Return %NULL on error. */ -void *chp_get_chp_desc(struct chp_id chpid) +struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid) { struct channel_path *chp; struct channel_path_desc *desc; @@ -462,7 +520,10 @@ void *chp_get_chp_desc(struct chp_id chpid) desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); if (!desc) return NULL; + + mutex_lock(&chp->lock); memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); + mutex_unlock(&chp->lock); return desc; } diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 26c3d224617..4efd5b867cc 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h @@ -1,7 +1,5 @@ /* - * drivers/s390/cio/chp.h - * - * Copyright IBM Corp. 2007 + * Copyright IBM Corp. 2007, 2010 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ @@ -10,6 +8,7 @@ #include <linux/types.h> #include <linux/device.h> +#include <linux/mutex.h> #include <asm/chpid.h> #include "chsc.h" #include "css.h" @@ -40,22 +39,31 @@ static inline int chp_test_bit(u8 *bitmap, int num) struct channel_path { + struct device dev; struct chp_id chpid; + struct mutex lock; /* Serialize access to below members. */ int state; struct channel_path_desc desc; + struct channel_path_desc_fmt1 desc_fmt1; /* Channel-measurement related stuff: */ int cmg; int shared; void *cmg_chars; - struct device dev; }; +/* Return channel_path struct for given chpid. */ +static inline struct channel_path *chpid_to_chp(struct chp_id chpid) +{ + return channel_subsystems[chpid.cssid]->chps[chpid.id]; +} + int chp_get_status(struct chp_id chpid); u8 chp_get_sch_opm(struct subchannel *sch); int chp_is_registered(struct chp_id chpid); -void *chp_get_chp_desc(struct chp_id chpid); +struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid); void chp_remove_cmg_attr(struct channel_path *chp); int chp_add_cmg_attr(struct channel_path *chp); +int chp_update_desc(struct channel_path *chp); int chp_new(struct chp_id chpid); void chp_cfg_schedule(struct chp_id chpid, int configure); void chp_cfg_cancel_deconfigure(struct chp_id chpid); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 4038f5b4f14..e3bf885f4a6 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -1,8 +1,7 @@ /* - * drivers/s390/cio/chsc.c * S/390 common I/O routines -- channel subsystem call * - * Copyright IBM Corp. 1999,2008 + * Copyright IBM Corp. 1999,2012 * Author(s): Ingo Adlung (adlung@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) @@ -15,11 +14,13 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/device.h> +#include <linux/pci.h> #include <asm/cio.h> #include <asm/chpid.h> #include <asm/chsc.h> #include <asm/crw.h> +#include <asm/isc.h> #include "css.h" #include "cio.h" @@ -29,6 +30,8 @@ #include "chsc.h" static void *sei_page; +static void *chsc_page; +static DEFINE_SPINLOCK(chsc_page_lock); /** * chsc_error_from_response() - convert a chsc response to an error @@ -47,9 +50,16 @@ int chsc_error_from_response(int response) case 0x0007: case 0x0008: case 0x000a: + case 0x0104: return -EINVAL; case 0x0004: return -EOPNOTSUPP; + case 0x000b: + case 0x0107: /* "Channel busy" for the op 0x003d */ + return -EBUSY; + case 0x0100: + case 0x0102: + return -ENOMEM; default: return -EIO; } @@ -82,17 +92,15 @@ struct chsc_ssd_area { int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) { - unsigned long page; struct chsc_ssd_area *ssd_area; int ccode; int ret; int i; int mask; - page = get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!page) - return -ENOMEM; - ssd_area = (struct chsc_ssd_area *) page; + spin_lock_irq(&chsc_page_lock); + memset(chsc_page, 0, PAGE_SIZE); + ssd_area = chsc_page; ssd_area->request.length = 0x0010; ssd_area->request.code = 0x0004; ssd_area->ssid = schid.ssid; @@ -103,25 +111,25 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) /* Check response. */ if (ccode > 0) { ret = (ccode == 3) ? -ENODEV : -EBUSY; - goto out_free; + goto out; } ret = chsc_error_from_response(ssd_area->response.code); if (ret != 0) { CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", schid.ssid, schid.sch_no, ssd_area->response.code); - goto out_free; + goto out; } if (!ssd_area->sch_valid) { ret = -ENODEV; - goto out_free; + goto out; } /* Copy data */ ret = 0; memset(ssd, 0, sizeof(struct chsc_ssd_info)); if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && (ssd_area->st != SUBCHANNEL_TYPE_MSG)) - goto out_free; + goto out; ssd->path_mask = ssd_area->path_mask; ssd->fla_valid_mask = ssd_area->fla_valid_mask; for (i = 0; i < 8; i++) { @@ -133,11 +141,70 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) if (ssd_area->fla_valid_mask & mask) ssd->fla[i] = ssd_area->fla[i]; } -out_free: - free_page(page); +out: + spin_unlock_irq(&chsc_page_lock); return ret; } +/** + * chsc_ssqd() - store subchannel QDIO data (SSQD) + * @schid: id of the subchannel on which SSQD is performed + * @ssqd: request and response block for SSQD + * + * Returns 0 on success. + */ +int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd) +{ + memset(ssqd, 0, sizeof(*ssqd)); + ssqd->request.length = 0x0010; + ssqd->request.code = 0x0024; + ssqd->first_sch = schid.sch_no; + ssqd->last_sch = schid.sch_no; + ssqd->ssid = schid.ssid; + + if (chsc(ssqd)) + return -EIO; + + return chsc_error_from_response(ssqd->response.code); +} +EXPORT_SYMBOL_GPL(chsc_ssqd); + +/** + * chsc_sadc() - set adapter device controls (SADC) + * @schid: id of the subchannel on which SADC is performed + * @scssc: request and response block for SADC + * @summary_indicator_addr: summary indicator address + * @subchannel_indicator_addr: subchannel indicator address + * + * Returns 0 on success. + */ +int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, + u64 summary_indicator_addr, u64 subchannel_indicator_addr) +{ + memset(scssc, 0, sizeof(*scssc)); + scssc->request.length = 0x0fe0; + scssc->request.code = 0x0021; + scssc->operation_code = 0; + + scssc->summary_indicator_addr = summary_indicator_addr; + scssc->subchannel_indicator_addr = subchannel_indicator_addr; + + scssc->ks = PAGE_DEFAULT_KEY >> 4; + scssc->kc = PAGE_DEFAULT_KEY >> 4; + scssc->isc = QDIO_AIRQ_ISC; + scssc->schid = schid; + + /* enable the time delay disablement facility */ + if (css_general_characteristics.aif_tdd) + scssc->word_with_d_bit = 0x10000000; + + if (chsc(scssc)) + return -EIO; + + return chsc_error_from_response(scssc->response.code); +} +EXPORT_SYMBOL_GPL(chsc_sadc); + static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) { spin_lock_irq(sch->lock); @@ -171,26 +238,6 @@ void chsc_chp_offline(struct chp_id chpid) for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); } -static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) -{ - struct schib schib; - /* - * We don't know the device yet, but since a path - * may be available now to the device we'll have - * to do recognition again. - * Since we don't have any idea about which chpid - * that beast may be on we'll have to do a stsch - * on all devices, grr... - */ - if (stsch_err(schid, &schib)) - /* We're through */ - return -ENXIO; - - /* Put it on the slow path. */ - css_schedule_eval(schid); - return 0; -} - static int __s390_process_res_acc(struct subchannel *sch, void *data) { spin_lock_irq(sch->lock); @@ -221,8 +268,8 @@ static void s390_process_res_acc(struct chp_link *link) * The more information we have (info), the less scanning * will we have to do. */ - for_each_subchannel_staged(__s390_process_res_acc, - s390_process_res_acc_new_sch, link); + for_each_subchannel_staged(__s390_process_res_acc, NULL, link); + css_schedule_reprobe(); } static int @@ -255,26 +302,46 @@ __get_chpid_from_lir(void *data) return (u16) (lir->indesc[0]&0x000000ff); } -struct chsc_sei_area { - struct chsc_header request; +struct chsc_sei_nt0_area { + u8 flags; + u8 vf; /* validity flags */ + u8 rs; /* reporting source */ + u8 cc; /* content code */ + u16 fla; /* full link address */ + u16 rsid; /* reporting source id */ u32 reserved1; u32 reserved2; - u32 reserved3; - struct chsc_header response; - u32 reserved4; - u8 flags; - u8 vf; /* validity flags */ - u8 rs; /* reporting source */ - u8 cc; /* content code */ - u16 fla; /* full link address */ - u16 rsid; /* reporting source id */ - u32 reserved5; - u32 reserved6; - u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ /* ccdf has to be big enough for a link-incident record */ -} __attribute__ ((packed)); - -static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) + u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */ +} __packed; + +struct chsc_sei_nt2_area { + u8 flags; /* p and v bit */ + u8 reserved1; + u8 reserved2; + u8 cc; /* content code */ + u32 reserved3[13]; + u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */ +} __packed; + +#define CHSC_SEI_NT0 (1ULL << 63) +#define CHSC_SEI_NT2 (1ULL << 61) + +struct chsc_sei { + struct chsc_header request; + u32 reserved1; + u64 ntsm; /* notification type mask */ + struct chsc_header response; + u32 :24; + u8 nt; + union { + struct chsc_sei_nt0_area nt0_area; + struct chsc_sei_nt2_area nt2_area; + u8 nt_area[PAGE_SIZE - 24]; + } u; +} __packed; + +static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) { struct chp_id chpid; int id; @@ -293,7 +360,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) } } -static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) +static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) { struct chp_link link; struct chp_id chpid; @@ -325,13 +392,43 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) s390_process_res_acc(&link); } +static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) +{ + struct channel_path *chp; + struct chp_id chpid; + u8 *data; + int num; + + CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); + if (sei_area->rs != 0) + return; + data = sei_area->ccdf; + chp_id_init(&chpid); + for (num = 0; num <= __MAX_CHPID; num++) { + if (!chp_test_bit(data, num)) + continue; + chpid.id = num; + + CIO_CRW_EVENT(4, "Update information for channel path " + "%x.%02x\n", chpid.cssid, chpid.id); + chp = chpid_to_chp(chpid); + if (!chp) { + chp_new(chpid); + continue; + } + mutex_lock(&chp->lock); + chp_update_desc(chp); + mutex_unlock(&chp->lock); + } +} + struct chp_config_data { u8 map[32]; u8 op; u8 pc; }; -static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) +static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) { struct chp_config_data *data; struct chp_id chpid; @@ -363,34 +460,139 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) } } -static void chsc_process_sei(struct chsc_sei_area *sei_area) +static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) { - /* Check if we might have lost some information. */ - if (sei_area->flags & 0x40) { - CIO_CRW_EVENT(2, "chsc: event overflow\n"); - css_schedule_eval_all(); + int ret; + + CIO_CRW_EVENT(4, "chsc: scm change notification\n"); + if (sei_area->rs != 7) + return; + + ret = scm_update_information(); + if (ret) + CIO_CRW_EVENT(0, "chsc: updating change notification" + " failed (rc=%d).\n", ret); +} + +static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) +{ + int ret; + + CIO_CRW_EVENT(4, "chsc: scm available information\n"); + if (sei_area->rs != 7) + return; + + ret = scm_process_availability_information(); + if (ret) + CIO_CRW_EVENT(0, "chsc: process availability information" + " failed (rc=%d).\n", ret); +} + +static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) +{ + switch (sei_area->cc) { + case 1: + zpci_event_error(sei_area->ccdf); + break; + case 2: + zpci_event_availability(sei_area->ccdf); + break; + default: + CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", + sei_area->cc); + break; } +} + +static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) +{ /* which kind of information was stored? */ switch (sei_area->cc) { case 1: /* link incident*/ chsc_process_sei_link_incident(sei_area); break; - case 2: /* i/o resource accessibiliy */ + case 2: /* i/o resource accessibility */ chsc_process_sei_res_acc(sei_area); break; + case 7: /* channel-path-availability information */ + chsc_process_sei_chp_avail(sei_area); + break; case 8: /* channel-path-configuration notification */ chsc_process_sei_chp_config(sei_area); break; + case 12: /* scm change notification */ + chsc_process_sei_scm_change(sei_area); + break; + case 14: /* scm available notification */ + chsc_process_sei_scm_avail(sei_area); + break; default: /* other stuff */ - CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", + CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", sei_area->cc); break; } + + /* Check if we might have lost some information. */ + if (sei_area->flags & 0x40) { + CIO_CRW_EVENT(2, "chsc: event overflow\n"); + css_schedule_eval_all(); + } } +static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) +{ + static int ntsm_unsupported; + + while (true) { + memset(sei, 0, sizeof(*sei)); + sei->request.length = 0x0010; + sei->request.code = 0x000e; + if (!ntsm_unsupported) + sei->ntsm = ntsm; + + if (chsc(sei)) + break; + + if (sei->response.code != 0x0001) { + CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", + sei->response.code, sei->ntsm); + + if (sei->response.code == 3 && sei->ntsm) { + /* Fallback for old firmware. */ + ntsm_unsupported = 1; + continue; + } + break; + } + + CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); + switch (sei->nt) { + case 0: + chsc_process_sei_nt0(&sei->u.nt0_area); + break; + case 2: + chsc_process_sei_nt2(&sei->u.nt2_area); + break; + default: + CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); + break; + } + + if (!(sei->u.nt0_area.flags & 0x80)) + break; + } +} + +/* + * Handle channel subsystem related CRWs. + * Use store event information to find out what's going on. + * + * Note: Access to sei_page is serialized through machine check handler + * thread, so no need for locking. + */ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { - struct chsc_sei_area *sei_area; + struct chsc_sei *sei = sei_page; if (overflow) { css_schedule_eval_all(); @@ -400,29 +602,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); - if (!sei_page) - return; - /* Access to sei_page is serialized through machine check handler - * thread, so no need for locking. */ - sei_area = sei_page; CIO_TRACE_EVENT(2, "prcss"); - do { - memset(sei_area, 0, sizeof(*sei_area)); - sei_area->request.length = 0x0010; - sei_area->request.code = 0x000e; - if (chsc(sei_area)) - break; - - if (sei_area->response.code == 0x0001) { - CIO_CRW_EVENT(4, "chsc: sei successful\n"); - chsc_process_sei(sei_area); - } else { - CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", - sei_area->response.code); - break; - } - } while (sei_area->flags & 0x80); + chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); } void chsc_chp_online(struct chp_id chpid) @@ -440,6 +622,7 @@ void chsc_chp_online(struct chp_id chpid) css_wait_for_slow_path(); for_each_subchannel_staged(__s390_process_res_acc, NULL, &link); + css_schedule_reprobe(); } } @@ -474,19 +657,6 @@ static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) return 0; } -static int -__s390_vary_chpid_on(struct subchannel_id schid, void *data) -{ - struct schib schib; - - if (stsch_err(schid, &schib)) - /* We're through */ - return -ENXIO; - /* Put it on the slow path. */ - css_schedule_eval(schid); - return 0; -} - /** * chsc_chp_vary - propagate channel-path vary operation to subchannels * @chpid: channl-path ID @@ -494,22 +664,22 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) */ int chsc_chp_vary(struct chp_id chpid, int on) { - struct chp_link link; + struct channel_path *chp = chpid_to_chp(chpid); - memset(&link, 0, sizeof(struct chp_link)); - link.chpid = chpid; /* Wait until previous actions have settled. */ css_wait_for_slow_path(); /* * Redo PathVerification on the devices the chpid connects to */ - - if (on) + if (on) { + /* Try to update the channel path description. */ + chp_update_desc(chp); for_each_subchannel_staged(s390_subchannel_vary_chpid_on, - __s390_vary_chpid_on, &link); - else + NULL, &chpid); + css_schedule_reprobe(); + } else for_each_subchannel_staged(s390_subchannel_vary_chpid_off, - NULL, &link); + NULL, &chpid); return 0; } @@ -549,7 +719,7 @@ cleanup: return ret; } -int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) +int __chsc_do_secm(struct channel_subsystem *css, int enable) { struct { struct chsc_header request; @@ -570,7 +740,9 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) } __attribute__ ((packed)) *secm_area; int ret, ccode; - secm_area = page; + spin_lock_irq(&chsc_page_lock); + memset(chsc_page, 0, PAGE_SIZE); + secm_area = chsc_page; secm_area->request.length = 0x0050; secm_area->request.code = 0x0016; @@ -581,8 +753,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) secm_area->operation_code = enable ? 0 : 1; ccode = chsc(secm_area); - if (ccode > 0) - return (ccode == 3) ? -ENODEV : -EBUSY; + if (ccode > 0) { + ret = (ccode == 3) ? -ENODEV : -EBUSY; + goto out; + } switch (secm_area->response.code) { case 0x0102: @@ -595,37 +769,32 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) if (ret != 0) CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", secm_area->response.code); +out: + spin_unlock_irq(&chsc_page_lock); return ret; } int chsc_secm(struct channel_subsystem *css, int enable) { - void *secm_area; int ret; - secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!secm_area) - return -ENOMEM; - if (enable && !css->cm_enabled) { css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!css->cub_addr1 || !css->cub_addr2) { free_page((unsigned long)css->cub_addr1); free_page((unsigned long)css->cub_addr2); - free_page((unsigned long)secm_area); return -ENOMEM; } } - ret = __chsc_do_secm(css, enable, secm_area); + ret = __chsc_do_secm(css, enable); if (!ret) { css->cm_enabled = enable; if (css->cm_enabled) { ret = chsc_add_cmg_attr(css); if (ret) { - memset(secm_area, 0, PAGE_SIZE); - __chsc_do_secm(css, 0, secm_area); + __chsc_do_secm(css, 0); css->cm_enabled = 0; } } else @@ -635,44 +804,24 @@ chsc_secm(struct channel_subsystem *css, int enable) free_page((unsigned long)css->cub_addr1); free_page((unsigned long)css->cub_addr2); } - free_page((unsigned long)secm_area); return ret; } int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, - int c, int m, - struct chsc_response_struct *resp) + int c, int m, void *page) { + struct chsc_scpd *scpd_area; int ccode, ret; - struct { - struct chsc_header request; - u32 : 2; - u32 m : 1; - u32 c : 1; - u32 fmt : 4; - u32 cssid : 8; - u32 : 4; - u32 rfmt : 4; - u32 first_chpid : 8; - u32 : 24; - u32 last_chpid : 8; - u32 zeroes1; - struct chsc_header response; - u8 data[PAGE_SIZE - 20]; - } __attribute__ ((packed)) *scpd_area; - if ((rfmt == 1) && !css_general_characteristics.fcs) return -EINVAL; if ((rfmt == 2) && !css_general_characteristics.cib) return -EINVAL; - scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!scpd_area) - return -ENOMEM; + memset(page, 0, PAGE_SIZE); + scpd_area = page; scpd_area->request.length = 0x0010; scpd_area->request.code = 0x0002; - scpd_area->cssid = chpid.cssid; scpd_area->first_chpid = chpid.id; scpd_area->last_chpid = chpid.id; @@ -682,20 +831,13 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, scpd_area->rfmt = rfmt; ccode = chsc(scpd_area); - if (ccode > 0) { - ret = (ccode == 3) ? -ENODEV : -EBUSY; - goto out; - } + if (ccode > 0) + return (ccode == 3) ? -ENODEV : -EBUSY; ret = chsc_error_from_response(scpd_area->response.code); - if (ret == 0) - /* Success. */ - memcpy(resp, &scpd_area->response, scpd_area->response.length); - else + if (ret) CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", scpd_area->response.code); -out: - free_page((unsigned long)scpd_area); return ret; } EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); @@ -704,17 +846,39 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid, struct channel_path_desc *desc) { struct chsc_response_struct *chsc_resp; + struct chsc_scpd *scpd_area; + unsigned long flags; int ret; - chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); - if (!chsc_resp) - return -ENOMEM; - ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); + spin_lock_irqsave(&chsc_page_lock, flags); + scpd_area = chsc_page; + ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); + if (ret) + goto out; + chsc_resp = (void *)&scpd_area->response; + memcpy(desc, &chsc_resp->data, sizeof(*desc)); +out: + spin_unlock_irqrestore(&chsc_page_lock, flags); + return ret; +} + +int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, + struct channel_path_desc_fmt1 *desc) +{ + struct chsc_response_struct *chsc_resp; + struct chsc_scpd *scpd_area; + unsigned long flags; + int ret; + + spin_lock_irqsave(&chsc_page_lock, flags); + scpd_area = chsc_page; + ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); if (ret) - goto out_free; - memcpy(desc, &chsc_resp->data, chsc_resp->length); -out_free: - kfree(chsc_resp); + goto out; + chsc_resp = (void *)&scpd_area->response; + memcpy(desc, &chsc_resp->data, sizeof(*desc)); +out: + spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; } @@ -722,33 +886,22 @@ static void chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, struct cmg_chars *chars) { - switch (chp->cmg) { - case 2: - case 3: - chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), - GFP_KERNEL); - if (chp->cmg_chars) { - int i, mask; - struct cmg_chars *cmg_chars; - - cmg_chars = chp->cmg_chars; - for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { - mask = 0x80 >> (i + 3); - if (cmcv & mask) - cmg_chars->values[i] = chars->values[i]; - else - cmg_chars->values[i] = 0; - } - } - break; - default: - /* No cmg-dependent data. */ - break; + struct cmg_chars *cmg_chars; + int i, mask; + + cmg_chars = chp->cmg_chars; + for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { + mask = 0x80 >> (i + 3); + if (cmcv & mask) + cmg_chars->values[i] = chars->values[i]; + else + cmg_chars->values[i] = 0; } } int chsc_get_channel_measurement_chars(struct channel_path *chp) { + struct cmg_chars *cmg_chars; int ccode, ret; struct { @@ -772,13 +925,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) u32 data[NR_MEASUREMENT_CHARS]; } __attribute__ ((packed)) *scmc_area; - scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!scmc_area) + chp->cmg_chars = NULL; + cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); + if (!cmg_chars) return -ENOMEM; + spin_lock_irq(&chsc_page_lock); + memset(chsc_page, 0, PAGE_SIZE); + scmc_area = chsc_page; scmc_area->request.length = 0x0010; scmc_area->request.code = 0x0022; - scmc_area->first_chpid = chp->chpid.id; scmc_area->last_chpid = chp->chpid.id; @@ -789,52 +945,63 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) } ret = chsc_error_from_response(scmc_area->response.code); - if (ret == 0) { - /* Success. */ - if (!scmc_area->not_valid) { - chp->cmg = scmc_area->cmg; - chp->shared = scmc_area->shared; - chsc_initialize_cmg_chars(chp, scmc_area->cmcv, - (struct cmg_chars *) - &scmc_area->data); - } else { - chp->cmg = -1; - chp->shared = -1; - } - } else { + if (ret) { CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", scmc_area->response.code); + goto out; + } + if (scmc_area->not_valid) { + chp->cmg = -1; + chp->shared = -1; + goto out; } + chp->cmg = scmc_area->cmg; + chp->shared = scmc_area->shared; + if (chp->cmg != 2 && chp->cmg != 3) { + /* No cmg-dependent data. */ + goto out; + } + chp->cmg_chars = cmg_chars; + chsc_initialize_cmg_chars(chp, scmc_area->cmcv, + (struct cmg_chars *) &scmc_area->data); out: - free_page((unsigned long)scmc_area); + spin_unlock_irq(&chsc_page_lock); + if (!chp->cmg_chars) + kfree(cmg_chars); + return ret; } -int __init chsc_alloc_sei_area(void) +int __init chsc_init(void) { int ret; sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!sei_page) { - CIO_MSG_EVENT(0, "Can't allocate page for processing of " - "chsc machine checks!\n"); - return -ENOMEM; + chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sei_page || !chsc_page) { + ret = -ENOMEM; + goto out_err; } ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); if (ret) - kfree(sei_page); + goto out_err; + return ret; +out_err: + free_page((unsigned long)chsc_page); + free_page((unsigned long)sei_page); return ret; } -void __init chsc_free_sei_area(void) +void __init chsc_init_cleanup(void) { crw_unregister_handler(CRW_RSC_CSS); - kfree(sei_page); + free_page((unsigned long)chsc_page); + free_page((unsigned long)sei_page); } -int __init -chsc_enable_facility(int operation_code) +int chsc_enable_facility(int operation_code) { + unsigned long flags; int ret; struct { struct chsc_header request; @@ -851,9 +1018,9 @@ chsc_enable_facility(int operation_code) u32 reserved6:24; } __attribute__ ((packed)) *sda_area; - sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); - if (!sda_area) - return -ENOMEM; + spin_lock_irqsave(&chsc_page_lock, flags); + memset(chsc_page, 0, PAGE_SIZE); + sda_area = chsc_page; sda_area->request.length = 0x0400; sda_area->request.code = 0x0031; sda_area->operation_code = operation_code; @@ -874,8 +1041,8 @@ chsc_enable_facility(int operation_code) if (ret != 0) CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", operation_code, sda_area->response.code); - out: - free_page((unsigned long)sda_area); +out: + spin_unlock_irqrestore(&chsc_page_lock, flags); return ret; } @@ -894,13 +1061,12 @@ chsc_determine_css_characteristics(void) struct chsc_header response; u32 reserved4; u32 general_char[510]; - u32 chsc_char[518]; + u32 chsc_char[508]; } __attribute__ ((packed)) *scsc_area; - scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!scsc_area) - return -ENOMEM; - + spin_lock_irq(&chsc_page_lock); + memset(chsc_page, 0, PAGE_SIZE); + scsc_area = chsc_page; scsc_area->request.length = 0x0010; scsc_area->request.code = 0x0010; @@ -920,7 +1086,7 @@ chsc_determine_css_characteristics(void) CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", scsc_area->response.code); exit: - free_page ((unsigned long) scsc_area); + spin_unlock_irq(&chsc_page_lock); return result; } @@ -975,3 +1141,110 @@ int chsc_sstpi(void *page, void *result, size_t size) return (rr->response.code == 0x0001) ? 0 : -EIO; } +int chsc_siosl(struct subchannel_id schid) +{ + struct { + struct chsc_header request; + u32 word1; + struct subchannel_id sid; + u32 word3; + struct chsc_header response; + u32 word[11]; + } __attribute__ ((packed)) *siosl_area; + unsigned long flags; + int ccode; + int rc; + + spin_lock_irqsave(&chsc_page_lock, flags); + memset(chsc_page, 0, PAGE_SIZE); + siosl_area = chsc_page; + siosl_area->request.length = 0x0010; + siosl_area->request.code = 0x0046; + siosl_area->word1 = 0x80000000; + siosl_area->sid = schid; + + ccode = chsc(siosl_area); + if (ccode > 0) { + if (ccode == 3) + rc = -ENODEV; + else + rc = -EBUSY; + CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", + schid.ssid, schid.sch_no, ccode); + goto out; + } + rc = chsc_error_from_response(siosl_area->response.code); + if (rc) + CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", + schid.ssid, schid.sch_no, + siosl_area->response.code); + else + CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", + schid.ssid, schid.sch_no); +out: + spin_unlock_irqrestore(&chsc_page_lock, flags); + return rc; +} +EXPORT_SYMBOL_GPL(chsc_siosl); + +/** + * chsc_scm_info() - store SCM information (SSI) + * @scm_area: request and response block for SSI + * @token: continuation token + * + * Returns 0 on success. + */ +int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) +{ + int ccode, ret; + + memset(scm_area, 0, sizeof(*scm_area)); + scm_area->request.length = 0x0020; + scm_area->request.code = 0x004C; + scm_area->reqtok = token; + + ccode = chsc(scm_area); + if (ccode > 0) { + ret = (ccode == 3) ? -ENODEV : -EBUSY; + goto out; + } + ret = chsc_error_from_response(scm_area->response.code); + if (ret != 0) + CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", + scm_area->response.code); +out: + return ret; +} +EXPORT_SYMBOL_GPL(chsc_scm_info); + +/** + * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info. + * @schid: id of the subchannel on which PNSO is performed + * @brinfo_area: request and response block for the operation + * @resume_token: resume token for multiblock response + * @cnc: Boolean change-notification control + * + * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL) + * + * Returns 0 on success. + */ +int chsc_pnso_brinfo(struct subchannel_id schid, + struct chsc_pnso_area *brinfo_area, + struct chsc_brinfo_resume_token resume_token, + int cnc) +{ + memset(brinfo_area, 0, sizeof(*brinfo_area)); + brinfo_area->request.length = 0x0030; + brinfo_area->request.code = 0x003d; /* network-subchannel operation */ + brinfo_area->m = schid.m; + brinfo_area->ssid = schid.ssid; + brinfo_area->sch = schid.sch_no; + brinfo_area->cssid = schid.cssid; + brinfo_area->oc = 0; /* Store-network-bridging-information list */ + brinfo_area->resume_token = resume_token; + brinfo_area->n = (cnc != 0); + if (chsc(brinfo_area)) + return -EIO; + return chsc_error_from_response(brinfo_area->response.code); +} +EXPORT_SYMBOL_GPL(chsc_pnso_brinfo); diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 37aa611d4ac..76c9b50700b 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -3,17 +3,14 @@ #include <linux/types.h> #include <linux/device.h> +#include <asm/css_chars.h> #include <asm/chpid.h> #include <asm/chsc.h> #include <asm/schid.h> +#include <asm/qdio.h> #define CHSC_SDA_OC_MSS 0x2 -struct chsc_header { - u16 length; - u16 code; -} __attribute__ ((packed)); - #define NR_MEASUREMENT_CHARS 5 struct cmg_chars { u32 values[NR_MEASUREMENT_CHARS]; @@ -24,15 +21,22 @@ struct cmg_entry { u32 values[NR_MEASUREMENT_ENTRIES]; } __attribute__ ((packed)); -struct channel_path_desc { +struct channel_path_desc_fmt1 { u8 flags; u8 lsn; u8 desc; u8 chpid; - u8 swla; - u8 zeroes; - u8 chla; + u32:24; u8 chpp; + u32 unused[2]; + u16 chid; + u32:16; + u16 mdc; + u16:13; + u8 r:1; + u8 s:1; + u8 f:1; + u32 zeros[2]; } __attribute__ ((packed)); struct channel_path; @@ -46,7 +50,9 @@ struct css_chsc_char { u32 : 20; u32 scssc : 1; /* bit 107 */ u32 scsscf : 1; /* bit 108 */ - u32 : 19; + u32:7; + u32 pnso:1; /* bit 116 */ + u32:11; }__attribute__((packed)); extern struct css_chsc_char css_chsc_characteristics; @@ -57,27 +63,176 @@ struct chsc_ssd_info { struct chp_id chpid[8]; u16 fla[8]; }; + +struct chsc_ssqd_area { + struct chsc_header request; + u16:10; + u8 ssid:2; + u8 fmt:4; + u16 first_sch; + u16:16; + u16 last_sch; + u32:32; + struct chsc_header response; + u32:32; + struct qdio_ssqd_desc qdio_ssqd; +} __packed; + +struct chsc_scssc_area { + struct chsc_header request; + u16 operation_code; + u16:16; + u32:32; + u32:32; + u64 summary_indicator_addr; + u64 subchannel_indicator_addr; + u32 ks:4; + u32 kc:4; + u32:21; + u32 isc:3; + u32 word_with_d_bit; + u32:32; + struct subchannel_id schid; + u32 reserved[1004]; + struct chsc_header response; + u32:32; +} __packed; + +struct chsc_scpd { + struct chsc_header request; + u32:2; + u32 m:1; + u32 c:1; + u32 fmt:4; + u32 cssid:8; + u32:4; + u32 rfmt:4; + u32 first_chpid:8; + u32:24; + u32 last_chpid:8; + u32 zeroes1; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; +} __attribute__ ((packed)); + + extern int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd); extern int chsc_determine_css_characteristics(void); -extern int chsc_alloc_sei_area(void); -extern void chsc_free_sei_area(void); +extern int chsc_init(void); +extern void chsc_init_cleanup(void); extern int chsc_enable_facility(int); struct channel_subsystem; extern int chsc_secm(struct channel_subsystem *, int); -int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page); +int __chsc_do_secm(struct channel_subsystem *css, int enable); int chsc_chp_vary(struct chp_id chpid, int on); int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, - int c, int m, - struct chsc_response_struct *resp); + int c, int m, void *page); int chsc_determine_base_channel_path_desc(struct chp_id chpid, struct channel_path_desc *desc); +int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, + struct channel_path_desc_fmt1 *desc); void chsc_chp_online(struct chp_id chpid); void chsc_chp_offline(struct chp_id chpid); int chsc_get_channel_measurement_chars(struct channel_path *chp); - +int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd); +int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, + u64 summary_indicator_addr, u64 subchannel_indicator_addr); int chsc_error_from_response(int response); +int chsc_siosl(struct subchannel_id schid); + +/* Functions and definitions to query storage-class memory. */ +struct sale { + u64 sa; + u32 p:4; + u32 op_state:4; + u32 data_state:4; + u32 rank:4; + u32 r:1; + u32:7; + u32 rid:8; + u32:32; +} __packed; + +struct chsc_scm_info { + struct chsc_header request; + u32:32; + u64 reqtok; + u32 reserved1[4]; + struct chsc_header response; + u64:56; + u8 rq; + u32 mbc; + u64 msa; + u16 is; + u16 mmc; + u32 mci; + u64 nr_scm_ini; + u64 nr_scm_unini; + u32 reserved2[10]; + u64 restok; + struct sale scmal[248]; +} __packed; + +int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); + +struct chsc_brinfo_resume_token { + u64 t1; + u64 t2; +} __packed; + +struct chsc_brinfo_naihdr { + struct chsc_brinfo_resume_token resume_token; + u32:32; + u32 instance; + u32:24; + u8 naids; + u32 reserved[3]; +} __packed; + +struct chsc_pnso_area { + struct chsc_header request; + u8:2; + u8 m:1; + u8:5; + u8:2; + u8 ssid:2; + u8 fmt:4; + u16 sch; + u8:8; + u8 cssid; + u16:16; + u8 oc; + u32:24; + struct chsc_brinfo_resume_token resume_token; + u32 n:1; + u32:31; + u32 reserved[3]; + struct chsc_header response; + u32:32; + struct chsc_brinfo_naihdr naihdr; + union { + struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0]; + struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0]; + struct qdio_brinfo_entry_l2 l2[0]; + } entries; +} __packed; + +int chsc_pnso_brinfo(struct subchannel_id schid, + struct chsc_pnso_area *brinfo_area, + struct chsc_brinfo_resume_token resume_token, + int cnc); + +#ifdef CONFIG_SCM_BUS +int scm_update_information(void); +int scm_process_availability_information(void); +#else /* CONFIG_SCM_BUS */ +static inline int scm_update_information(void) { return 0; } +static inline int scm_process_availability_information(void) { return 0; } +#endif /* CONFIG_SCM_BUS */ + + #endif diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 852612f5dba..3d22d2a4ce1 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -1,16 +1,19 @@ /* * Driver for s390 chsc subchannels * - * Copyright IBM Corp. 2008, 2009 + * Copyright IBM Corp. 2008, 2011 * * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * */ +#include <linux/slab.h> +#include <linux/compat.h> #include <linux/device.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> +#include <linux/kernel_stat.h> #include <asm/compat.h> #include <asm/cio.h> @@ -26,6 +29,10 @@ static debug_info_t *chsc_debug_msg_id; static debug_info_t *chsc_debug_log_id; +static struct chsc_request *on_close_request; +static struct chsc_async_area *on_close_chsc_area; +static DEFINE_MUTEX(on_close_mutex); + #define CHSC_MSG(imp, args...) do { \ debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ } while (0) @@ -49,12 +56,14 @@ MODULE_LICENSE("GPL"); static void chsc_subchannel_irq(struct subchannel *sch) { - struct chsc_private *private = sch->private; + struct chsc_private *private = dev_get_drvdata(&sch->dev); struct chsc_request *request = private->request; - struct irb *irb = (struct irb *)&S390_lowcore.irb; + struct irb *irb = &__get_cpu_var(cio_irb); CHSC_LOG(4, "irb"); CHSC_LOG_HEX(4, irb, sizeof(*irb)); + inc_irq_stat(IRQIO_CSC); + /* Copy irb to provided request and set done. */ if (!request) { CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", @@ -79,13 +88,14 @@ static int chsc_subchannel_probe(struct subchannel *sch) private = kzalloc(sizeof(*private), GFP_KERNEL); if (!private) return -ENOMEM; + dev_set_drvdata(&sch->dev, private); ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); if (ret) { CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", sch->schid.ssid, sch->schid.sch_no, ret); + dev_set_drvdata(&sch->dev, NULL); kfree(private); } else { - sch->private = private; if (dev_get_uevent_suppress(&sch->dev)) { dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); @@ -99,8 +109,8 @@ static int chsc_subchannel_remove(struct subchannel *sch) struct chsc_private *private; cio_disable_subchannel(sch); - private = sch->private; - sch->private = NULL; + private = dev_get_drvdata(&sch->dev); + dev_set_drvdata(&sch->dev, NULL); if (private->request) { complete(&private->request->completion); put_device(&sch->dev); @@ -123,7 +133,7 @@ static int chsc_subchannel_prepare(struct subchannel *sch) * since we don't have a way to clear the subchannel and * cannot disable it with a request running. */ - cc = stsch(sch->schid, &schib); + cc = stsch_err(sch->schid, &schib); if (!cc && scsw_stctl(&schib.scsw)) return -EAGAIN; return 0; @@ -146,7 +156,10 @@ static struct css_device_id chsc_subchannel_ids[] = { MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); static struct css_driver chsc_subchannel_driver = { - .owner = THIS_MODULE, + .drv = { + .owner = THIS_MODULE, + .name = "chsc_subchannel", + }, .subchannel_type = chsc_subchannel_ids, .irq = chsc_subchannel_irq, .probe = chsc_subchannel_probe, @@ -156,13 +169,11 @@ static struct css_driver chsc_subchannel_driver = { .freeze = chsc_subchannel_freeze, .thaw = chsc_subchannel_restore, .restore = chsc_subchannel_restore, - .name = "chsc_subchannel", }; static int __init chsc_init_dbfs(void) { - chsc_debug_msg_id = debug_register("chsc_msg", 16, 1, - 16 * sizeof(long)); + chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long)); if (!chsc_debug_msg_id) goto out; debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); @@ -240,7 +251,7 @@ static int chsc_async(struct chsc_async_area *chsc_area, chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; while ((sch = chsc_get_next_subchannel(sch))) { spin_lock(sch->lock); - private = sch->private; + private = dev_get_drvdata(&sch->dev); if (private->request) { spin_unlock(sch->lock); ret = -EBUSY; @@ -250,7 +261,7 @@ static int chsc_async(struct chsc_async_area *chsc_area, CHSC_LOG(2, "schid"); CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); cc = chsc(chsc_area); - sprintf(dbf, "cc:%d", cc); + snprintf(dbf, sizeof(dbf), "cc:%d", cc); CHSC_LOG(2, dbf); switch (cc) { case 0: @@ -279,11 +290,11 @@ static int chsc_async(struct chsc_async_area *chsc_area, return ret; } -static void chsc_log_command(struct chsc_async_area *chsc_area) +static void chsc_log_command(void *chsc_area) { char dbf[10]; - sprintf(dbf, "CHSC:%x", chsc_area->header.code); + snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]); CHSC_LOG(0, dbf); CHSC_LOG_HEX(0, chsc_area, 32); } @@ -347,13 +358,106 @@ static int chsc_ioctl_start(void __user *user_area) if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) ret = -EFAULT; out_free: - sprintf(dbf, "ret:%d", ret); + snprintf(dbf, sizeof(dbf), "ret:%d", ret); CHSC_LOG(0, dbf); kfree(request); free_page((unsigned long)chsc_area); return ret; } +static int chsc_ioctl_on_close_set(void __user *user_area) +{ + char dbf[13]; + int ret; + + mutex_lock(&on_close_mutex); + if (on_close_chsc_area) { + ret = -EBUSY; + goto out_unlock; + } + on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL); + if (!on_close_request) { + ret = -ENOMEM; + goto out_unlock; + } + on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); + if (!on_close_chsc_area) { + ret = -ENOMEM; + goto out_free_request; + } + if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) { + ret = -EFAULT; + goto out_free_chsc; + } + ret = 0; + goto out_unlock; + +out_free_chsc: + free_page((unsigned long)on_close_chsc_area); + on_close_chsc_area = NULL; +out_free_request: + kfree(on_close_request); + on_close_request = NULL; +out_unlock: + mutex_unlock(&on_close_mutex); + snprintf(dbf, sizeof(dbf), "ocsret:%d", ret); + CHSC_LOG(0, dbf); + return ret; +} + +static int chsc_ioctl_on_close_remove(void) +{ + char dbf[13]; + int ret; + + mutex_lock(&on_close_mutex); + if (!on_close_chsc_area) { + ret = -ENOENT; + goto out_unlock; + } + free_page((unsigned long)on_close_chsc_area); + on_close_chsc_area = NULL; + kfree(on_close_request); + on_close_request = NULL; + ret = 0; +out_unlock: + mutex_unlock(&on_close_mutex); + snprintf(dbf, sizeof(dbf), "ocrret:%d", ret); + CHSC_LOG(0, dbf); + return ret; +} + +static int chsc_ioctl_start_sync(void __user *user_area) +{ + struct chsc_sync_area *chsc_area; + int ret, ccode; + + chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!chsc_area) + return -ENOMEM; + if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { + ret = -EFAULT; + goto out_free; + } + if (chsc_area->header.code & 0x4000) { + ret = -EINVAL; + goto out_free; + } + chsc_log_command(chsc_area); + ccode = chsc(chsc_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) + ret = -EFAULT; + else + ret = 0; +out_free: + free_page((unsigned long)chsc_area); + return ret; +} + static int chsc_ioctl_info_channel_path(void __user *user_cd) { struct chsc_chp_cd *cd; @@ -687,25 +791,31 @@ out_free: static int chsc_ioctl_chpd(void __user *user_chpd) { + struct chsc_scpd *scpd_area; struct chsc_cpd_info *chpd; int ret; chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); - if (!chpd) - return -ENOMEM; + scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!scpd_area || !chpd) { + ret = -ENOMEM; + goto out_free; + } if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { ret = -EFAULT; goto out_free; } ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, chpd->rfmt, chpd->c, chpd->m, - &chpd->chpdb); + scpd_area); if (ret) goto out_free; + memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length); if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) ret = -EFAULT; out_free: kfree(chpd); + free_page((unsigned long)scpd_area); return ret; } @@ -781,6 +891,8 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd, switch (cmd) { case CHSC_START: return chsc_ioctl_start(argp); + case CHSC_START_SYNC: + return chsc_ioctl_start_sync(argp); case CHSC_INFO_CHANNEL_PATH: return chsc_ioctl_info_channel_path(argp); case CHSC_INFO_CU: @@ -795,15 +907,63 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd, return chsc_ioctl_chpd(argp); case CHSC_INFO_DCAL: return chsc_ioctl_dcal(argp); + case CHSC_ON_CLOSE_SET: + return chsc_ioctl_on_close_set(argp); + case CHSC_ON_CLOSE_REMOVE: + return chsc_ioctl_on_close_remove(); default: /* unknown ioctl number */ return -ENOIOCTLCMD; } } +static atomic_t chsc_ready_for_use = ATOMIC_INIT(1); + +static int chsc_open(struct inode *inode, struct file *file) +{ + if (!atomic_dec_and_test(&chsc_ready_for_use)) { + atomic_inc(&chsc_ready_for_use); + return -EBUSY; + } + return nonseekable_open(inode, file); +} + +static int chsc_release(struct inode *inode, struct file *filp) +{ + char dbf[13]; + int ret; + + mutex_lock(&on_close_mutex); + if (!on_close_chsc_area) + goto out_unlock; + init_completion(&on_close_request->completion); + CHSC_LOG(0, "on_close"); + chsc_log_command(on_close_chsc_area); + spin_lock_irq(&chsc_lock); + ret = chsc_async(on_close_chsc_area, on_close_request); + spin_unlock_irq(&chsc_lock); + if (ret == -EINPROGRESS) { + wait_for_completion(&on_close_request->completion); + ret = chsc_examine_irb(on_close_request); + } + snprintf(dbf, sizeof(dbf), "relret:%d", ret); + CHSC_LOG(0, dbf); + free_page((unsigned long)on_close_chsc_area); + on_close_chsc_area = NULL; + kfree(on_close_request); + on_close_request = NULL; +out_unlock: + mutex_unlock(&on_close_mutex); + atomic_inc(&chsc_ready_for_use); + return 0; +} + static const struct file_operations chsc_fops = { .owner = THIS_MODULE, + .open = chsc_open, + .release = chsc_release, .unlocked_ioctl = chsc_ioctl, .compat_ioctl = chsc_ioctl, + .llseek = no_llseek, }; static struct miscdevice chsc_misc_device = { diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index f736cdcf08a..2905d8b0ec9 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -1,8 +1,7 @@ /* - * drivers/s390/cio/cio.c * S/390 common I/O routines -- low level i/o calls * - * Copyright IBM Corp. 1999,2008 + * Copyright IBM Corp. 1999, 2008 * Author(s): Ingo Adlung (adlung@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) @@ -19,6 +18,7 @@ #include <linux/device.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <asm/cio.h> #include <asm/delay.h> #include <asm/irq.h> @@ -29,7 +29,7 @@ #include <asm/chpid.h> #include <asm/airq.h> #include <asm/isc.h> -#include <asm/cputime.h> +#include <linux/cputime.h> #include <asm/fcx.h> #include <asm/nmi.h> #include <asm/crw.h> @@ -46,6 +46,9 @@ debug_info_t *cio_debug_msg_id; debug_info_t *cio_debug_trace_id; debug_info_t *cio_debug_crw_id; +DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb); +EXPORT_PER_CPU_SYMBOL(cio_irb); + /* * Function: cio_debug_init * Initializes three debug logs for common I/O: @@ -55,7 +58,7 @@ debug_info_t *cio_debug_crw_id; */ static int __init cio_debug_init(void) { - cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long)); + cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long)); if (!cio_debug_msg_id) goto out_unregister; debug_register_view(cio_debug_msg_id, &debug_sprintf_view); @@ -65,7 +68,7 @@ static int __init cio_debug_init(void) goto out_unregister; debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view); debug_set_level(cio_debug_trace_id, 2); - cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long)); + cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long)); if (!cio_debug_crw_id) goto out_unregister; debug_register_view(cio_debug_crw_id, &debug_sprintf_view); @@ -84,29 +87,14 @@ out_unregister: arch_initcall (cio_debug_init); -int -cio_set_options (struct subchannel *sch, int flags) +int cio_set_options(struct subchannel *sch, int flags) { - sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; - sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; - sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; - return 0; -} + struct io_subchannel_private *priv = to_io_private(sch); -/* FIXME: who wants to use this? */ -int -cio_get_options (struct subchannel *sch) -{ - int flags; - - flags = 0; - if (sch->options.suspend) - flags |= DOIO_ALLOW_SUSPEND; - if (sch->options.prefetch) - flags |= DOIO_DENY_PREFETCH; - if (sch->options.inter) - flags |= DOIO_SUPPRESS_INTER; - return flags; + priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; + priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; + priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; + return 0; } static int @@ -139,21 +127,21 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ __u8 lpm, /* logical path mask */ __u8 key) /* storage key */ { + struct io_subchannel_private *priv = to_io_private(sch); + union orb *orb = &priv->orb; int ccode; - union orb *orb; CIO_TRACE_EVENT(5, "stIO"); CIO_TRACE_EVENT(5, dev_name(&sch->dev)); - orb = &to_io_private(sch)->orb; memset(orb, 0, sizeof(union orb)); /* sch is always under 2G. */ orb->cmd.intparm = (u32)(addr_t)sch; orb->cmd.fmt = 1; - orb->cmd.pfch = sch->options.prefetch == 0; - orb->cmd.spnd = sch->options.suspend; - orb->cmd.ssic = sch->options.suspend && sch->options.inter; + orb->cmd.pfch = priv->options.prefetch == 0; + orb->cmd.spnd = priv->options.suspend; + orb->cmd.ssic = priv->options.suspend && priv->options.inter; orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; #ifdef CONFIG_64BIT /* @@ -358,10 +346,11 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib) */ int cio_commit_config(struct subchannel *sch) { - struct schib schib; int ccode, retry, ret = 0; + struct schib schib; + struct irb irb; - if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) + if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; for (retry = 0; retry < 5; retry++) { @@ -372,7 +361,7 @@ int cio_commit_config(struct subchannel *sch) return ccode; switch (ccode) { case 0: /* successful */ - if (stsch(sch->schid, &schib) || + if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; if (cio_check_config(sch, &schib)) { @@ -383,7 +372,10 @@ int cio_commit_config(struct subchannel *sch) ret = -EAGAIN; break; case 1: /* status pending */ - return -EBUSY; + ret = -EBUSY; + if (tsch(sch->schid, &irb)) + return ret; + break; case 2: /* busy */ udelay(100); /* allow for recovery */ ret = -EBUSY; @@ -404,7 +396,7 @@ int cio_update_schib(struct subchannel *sch) { struct schib schib; - if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) + if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; memcpy(&sch->schib, &schib, sizeof(schib)); @@ -419,7 +411,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib); */ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) { - int retry; int ret; CIO_TRACE_EVENT(2, "ensch"); @@ -434,20 +425,14 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) sch->config.isc = sch->isc; sch->config.intparm = intparm; - for (retry = 0; retry < 3; retry++) { + ret = cio_commit_config(sch); + if (ret == -EIO) { + /* + * Got a program check in msch. Try without + * the concurrent sense bit the next time. + */ + sch->config.csense = 0; ret = cio_commit_config(sch); - if (ret == -EIO) { - /* - * Got a program check in msch. Try without - * the concurrent sense bit the next time. - */ - sch->config.csense = 0; - } else if (ret == -EBUSY) { - struct irb irb; - if (tsch(sch->schid, &irb) != 0) - break; - } else - break; } CIO_HEX_EVENT(2, &ret, sizeof(ret)); return ret; @@ -460,7 +445,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel); */ int cio_disable_subchannel(struct subchannel *sch) { - int retry; int ret; CIO_TRACE_EVENT(2, "dissch"); @@ -472,30 +456,13 @@ int cio_disable_subchannel(struct subchannel *sch) return -ENODEV; sch->config.ena = 0; + ret = cio_commit_config(sch); - for (retry = 0; retry < 3; retry++) { - ret = cio_commit_config(sch); - if (ret == -EBUSY) { - struct irb irb; - if (tsch(sch->schid, &irb) != 0) - break; - } else - break; - } CIO_HEX_EVENT(2, &ret, sizeof(ret)); return ret; } EXPORT_SYMBOL_GPL(cio_disable_subchannel); -int cio_create_sch_lock(struct subchannel *sch) -{ - sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); - if (!sch->lock) - return -ENOMEM; - spin_lock_init(sch->lock); - return 0; -} - static int cio_check_devno_blacklisted(struct subchannel *sch) { if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { @@ -552,32 +519,19 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) sprintf(dbf_txt, "valsch%x", schid.sch_no); CIO_TRACE_EVENT(4, dbf_txt); - /* Nuke all fields. */ - memset(sch, 0, sizeof(struct subchannel)); - - sch->schid = schid; - if (cio_is_console(schid)) { - sch->lock = cio_get_console_lock(); - } else { - err = cio_create_sch_lock(sch); - if (err) - goto out; - } - mutex_init(&sch->reg_mutex); - /* * The first subchannel that is not-operational (ccode==3) - * indicates that there aren't any more devices available. + * indicates that there aren't any more devices available. * If stsch gets an exception, it means the current subchannel set - * is not valid. + * is not valid. */ - ccode = stsch_err (schid, &sch->schib); + ccode = stsch_err(schid, &sch->schib); if (ccode) { err = (ccode == 3) ? -ENXIO : ccode; goto out; } - /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; + sch->schid = schid; switch (sch->st) { case SUBCHANNEL_TYPE_IO: @@ -594,267 +548,179 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", sch->schid.ssid, sch->schid.sch_no, sch->st); - return 0; out: - if (!cio_is_console(schid)) - kfree(sch->lock); - sch->lock = NULL; return err; } /* - * do_IRQ() handles all normal I/O device IRQ's (the special - * SMP cross-CPU interrupts have their own specific - * handlers). - * + * do_cio_interrupt() handles all normal I/O device IRQ's */ -void __irq_entry do_IRQ(struct pt_regs *regs) +static irqreturn_t do_cio_interrupt(int irq, void *dummy) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; - struct pt_regs *old_regs; - - old_regs = set_irq_regs(regs); - s390_idle_check(); - irq_enter(); - __get_cpu_var(s390_idle).nohz_delay = 1; - if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) - /* Serve timer interrupts first. */ - clock_comparator_work(); - /* - * Get interrupt information from lowcore - */ - tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; - irb = (struct irb *)&S390_lowcore.irb; - do { - kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; - /* - * Non I/O-subchannel thin interrupts are processed differently - */ - if (tpi_info->adapter_IO == 1 && - tpi_info->int_type == IO_INTERRUPT_TYPE) { - do_adapter_IO(tpi_info->isc); - continue; - } - sch = (struct subchannel *)(unsigned long)tpi_info->intparm; - if (!sch) { - /* Clear pending interrupt condition. */ - tsch(tpi_info->schid, irb); - continue; - } - spin_lock(sch->lock); - /* Store interrupt response block to lowcore. */ - if (tsch(tpi_info->schid, irb) == 0) { - /* Keep subchannel information word up to date. */ - memcpy (&sch->schib.scsw, &irb->scsw, - sizeof (irb->scsw)); - /* Call interrupt handler if there is one. */ - if (sch->driver && sch->driver->irq) - sch->driver->irq(sch); - } - spin_unlock(sch->lock); - /* - * Are more interrupts pending? - * If so, the tpi instruction will update the lowcore - * to hold the info for the next interrupt. - * We don't do this for VM because a tpi drops the cpu - * out of the sie which costs more cycles than it saves. - */ - } while (MACHINE_IS_LPAR && tpi(NULL) != 0); - irq_exit(); - set_irq_regs(old_regs); + + __this_cpu_write(s390_idle.nohz_delay, 1); + tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; + irb = &__get_cpu_var(cio_irb); + sch = (struct subchannel *)(unsigned long) tpi_info->intparm; + if (!sch) { + /* Clear pending interrupt condition. */ + inc_irq_stat(IRQIO_CIO); + tsch(tpi_info->schid, irb); + return IRQ_HANDLED; + } + spin_lock(sch->lock); + /* Store interrupt response block to lowcore. */ + if (tsch(tpi_info->schid, irb) == 0) { + /* Keep subchannel information word up to date. */ + memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); + /* Call interrupt handler if there is one. */ + if (sch->driver && sch->driver->irq) + sch->driver->irq(sch); + else + inc_irq_stat(IRQIO_CIO); + } else + inc_irq_stat(IRQIO_CIO); + spin_unlock(sch->lock); + + return IRQ_HANDLED; +} + +static struct irqaction io_interrupt = { + .name = "IO", + .handler = do_cio_interrupt, +}; + +void __init init_cio_interrupts(void) +{ + irq_set_chip_and_handler(IO_INTERRUPT, + &dummy_irq_chip, handle_percpu_irq); + setup_irq(IO_INTERRUPT, &io_interrupt); } #ifdef CONFIG_CCW_CONSOLE -static struct subchannel console_subchannel; -static struct io_subchannel_private console_priv; -static int console_subchannel_in_use; +static struct subchannel *console_sch; +static struct lock_class_key console_sch_key; /* - * Use tpi to get a pending interrupt, call the interrupt handler and - * return a pointer to the subchannel structure. + * Use cio_tsch to update the subchannel status and call the interrupt handler + * if status had been pending. Called with the subchannel's lock held. */ -static int cio_tpi(void) +void cio_tsch(struct subchannel *sch) { - struct tpi_info *tpi_info; - struct subchannel *sch; struct irb *irb; int irq_context; - tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; - if (tpi(NULL) != 1) - return 0; - irb = (struct irb *)&S390_lowcore.irb; + irb = &__get_cpu_var(cio_irb); /* Store interrupt response block to lowcore. */ - if (tsch(tpi_info->schid, irb) != 0) + if (tsch(sch->schid, irb) != 0) /* Not status pending or not operational. */ - return 1; - sch = (struct subchannel *)(unsigned long)tpi_info->intparm; - if (!sch) - return 1; + return; + memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); + /* Call interrupt handler with updated status. */ irq_context = in_interrupt(); - if (!irq_context) + if (!irq_context) { local_bh_disable(); - irq_enter(); - spin_lock(sch->lock); - memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); + irq_enter(); + } + kstat_incr_irq_this_cpu(IO_INTERRUPT); if (sch->driver && sch->driver->irq) sch->driver->irq(sch); - spin_unlock(sch->lock); - irq_exit(); - if (!irq_context) + else + inc_irq_stat(IRQIO_CIO); + if (!irq_context) { + irq_exit(); _local_bh_enable(); - return 1; -} - -void *cio_get_console_priv(void) -{ - return &console_priv; + } } -/* - * busy wait for the next interrupt on the console - */ -void wait_cons_dev(void) - __releases(console_subchannel.lock) - __acquires(console_subchannel.lock) +static int cio_test_for_console(struct subchannel_id schid, void *data) { - unsigned long cr6 __attribute__ ((aligned (8))); - unsigned long save_cr6 __attribute__ ((aligned (8))); - - /* - * before entering the spinlock we may already have - * processed the interrupt on a different CPU... - */ - if (!console_subchannel_in_use) - return; - - /* disable all but the console isc */ - __ctl_store (save_cr6, 6, 6); - cr6 = 1UL << (31 - CONSOLE_ISC); - __ctl_load (cr6, 6, 6); - - do { - spin_unlock(console_subchannel.lock); - if (!cio_tpi()) - cpu_relax(); - spin_lock(console_subchannel.lock); - } while (console_subchannel.schib.scsw.cmd.actl != 0); - /* - * restore previous isc value - */ - __ctl_load (save_cr6, 6, 6); -} + struct schib schib; -static int -cio_test_for_console(struct subchannel_id schid, void *data) -{ - if (stsch_err(schid, &console_subchannel.schib) != 0) + if (stsch_err(schid, &schib) != 0) return -ENXIO; - if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) && - console_subchannel.schib.pmcw.dnv && - (console_subchannel.schib.pmcw.dev == console_devno)) { + if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && + (schib.pmcw.dev == console_devno)) { console_irq = schid.sch_no; return 1; /* found */ } return 0; } - -static int -cio_get_console_sch_no(void) +static int cio_get_console_sch_no(void) { struct subchannel_id schid; - + struct schib schib; + init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */ schid.sch_no = console_irq; - if (stsch(schid, &console_subchannel.schib) != 0 || - (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || - !console_subchannel.schib.pmcw.dnv) + if (stsch_err(schid, &schib) != 0 || + (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv) return -1; - console_devno = console_subchannel.schib.pmcw.dev; + console_devno = schib.pmcw.dev; } else if (console_devno != -1) { /* At least the console device number is known. */ for_each_subchannel(cio_test_for_console, NULL); - if (console_irq == -1) - return -1; - } else { - /* unlike in 2.4, we cannot autoprobe here, since - * the channel subsystem is not fully initialized. - * With some luck, the HWC console can take over */ - return -1; } return console_irq; } -struct subchannel * -cio_probe_console(void) +struct subchannel *cio_probe_console(void) { - int sch_no, ret; struct subchannel_id schid; + struct subchannel *sch; + int sch_no, ret; - if (xchg(&console_subchannel_in_use, 1) != 0) - return ERR_PTR(-EBUSY); sch_no = cio_get_console_sch_no(); if (sch_no == -1) { - console_subchannel_in_use = 0; pr_warning("No CCW console was found\n"); return ERR_PTR(-ENODEV); } - memset(&console_subchannel, 0, sizeof(struct subchannel)); init_subchannel_id(&schid); schid.sch_no = sch_no; - ret = cio_validate_subchannel(&console_subchannel, schid); - if (ret) { - console_subchannel_in_use = 0; - return ERR_PTR(-ENODEV); - } + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + return sch; - /* - * enable console I/O-interrupt subclass - */ + lockdep_set_class(sch->lock, &console_sch_key); isc_register(CONSOLE_ISC); - console_subchannel.config.isc = CONSOLE_ISC; - console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; - ret = cio_commit_config(&console_subchannel); + sch->config.isc = CONSOLE_ISC; + sch->config.intparm = (u32)(addr_t)sch; + ret = cio_commit_config(sch); if (ret) { isc_unregister(CONSOLE_ISC); - console_subchannel_in_use = 0; + put_device(&sch->dev); return ERR_PTR(ret); } - return &console_subchannel; + console_sch = sch; + return sch; } -void -cio_release_console(void) -{ - console_subchannel.config.intparm = 0; - cio_commit_config(&console_subchannel); - isc_unregister(CONSOLE_ISC); - console_subchannel_in_use = 0; -} - -/* Bah... hack to catch console special sausages. */ -int -cio_is_console(struct subchannel_id schid) +int cio_is_console(struct subchannel_id schid) { - if (!console_subchannel_in_use) + if (!console_sch) return 0; - return schid_equal(&schid, &console_subchannel.schid); + return schid_equal(&schid, &console_sch->schid); } -struct subchannel * -cio_get_console_subchannel(void) +void cio_register_early_subchannels(void) { - if (!console_subchannel_in_use) - return NULL; - return &console_subchannel; + int ret; + + if (!console_sch) + return; + + ret = css_register_subchannel(console_sch); + if (ret) + put_device(&console_sch->dev); } +#endif /* CONFIG_CCW_CONSOLE */ -#endif static int __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) { @@ -863,10 +729,10 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) cc = 0; for (retry=0;retry<3;retry++) { schib->pmcw.ena = 0; - cc = msch(schid, schib); + cc = msch_err(schid, schib); if (cc) return (cc==3?-ENODEV:-EBUSY); - if (stsch(schid, schib) || !css_sch_is_valid(schib)) + if (stsch_err(schid, schib) || !css_sch_is_valid(schib)) return -ENODEV; if (!schib->pmcw.ena) return 0; @@ -885,7 +751,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid) struct tpi_info ti; if (tpi(&ti)) { - tsch(ti.schid, (struct irb *)&S390_lowcore.irb); + tsch(ti.schid, &__get_cpu_var(cio_irb)); if (schid_equal(&ti.schid, &schid)) return 0; } @@ -913,7 +779,7 @@ static int stsch_reset(struct subchannel_id schid, struct schib *addr) pgm_check_occured = 0; s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; - rc = stsch(schid, addr); + rc = stsch_err(schid, addr); s390_base_pgm_handler_fn = NULL; /* The program check handler could have changed pgm_check_occured. */ @@ -950,7 +816,7 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) /* No default clear strategy */ break; } - stsch(schid, &schib); + stsch_err(schid, &schib); __disable_subchannel_easy(schid, &schib); } out: @@ -1003,9 +869,9 @@ static void css_reset(void) atomic_inc(&chpid_reset_count); } /* Wait for machine check for all channel paths. */ - timeout = get_clock() + (RCHP_TIMEOUT << 12); + timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12); while (atomic_read(&chpid_reset_count) != 0) { - if (get_clock() > timeout) + if (get_tod_clock_fast() > timeout) break; cpu_relax(); } @@ -1070,9 +936,9 @@ extern void do_reipl_asm(__u32 schid); /* Make sure all subchannels are quiet before we re-ipl an lpar. */ void reipl_ccw_dev(struct ccw_dev_id *devid) { - struct subchannel_id schid; + struct subchannel_id uninitialized_var(schid); - s390_reset_system(); + s390_reset_system(NULL, NULL); if (reipl_find_schid(devid, &schid) != 0) panic("IPL Device not found\n"); do_reipl_asm(*((__u32*)&schid)); @@ -1086,7 +952,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; if (!schid.one) return -ENODEV; - if (stsch(schid, &schib)) + if (stsch_err(schid, &schib)) return -ENODEV; if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) return -ENODEV; diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index bf7f80f5a33..a01376ae174 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -68,8 +68,13 @@ struct schib { __u8 mda[4]; /* model dependent area */ } __attribute__ ((packed,aligned(4))); +/* + * When rescheduled, todo's with higher values will overwrite those + * with lower values. + */ enum sch_todo { SCH_TODO_NOTHING, + SCH_TODO_EVAL, SCH_TODO_UNREG, }; @@ -84,13 +89,6 @@ struct subchannel { SUBCHANNEL_TYPE_MSG = 2, SUBCHANNEL_TYPE_ADM = 3, } st; /* subchannel type */ - - struct { - unsigned int suspend:1; /* allow suspend */ - unsigned int prefetch:1;/* deny prefetch */ - unsigned int inter:1; /* suppress intermediate interrupts */ - } __attribute__ ((packed)) options; - __u8 vpm; /* verified path mask */ __u8 lpm; /* logical path mask */ __u8 opm; /* operational path mask */ @@ -99,13 +97,12 @@ struct subchannel { struct chsc_ssd_info ssd_info; /* subchannel description */ struct device dev; /* entry in device tree */ struct css_driver *driver; - void *private; /* private per subchannel type data */ enum sch_todo todo; struct work_struct todo_work; struct schib_config config; } __attribute__ ((aligned(8))); -#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ +DECLARE_PER_CPU(struct irb, cio_irb); #define to_subchannel(n) container_of(n, struct subchannel, dev) @@ -120,30 +117,21 @@ extern int cio_start (struct subchannel *, struct ccw1 *, __u8); extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8); extern int cio_cancel (struct subchannel *); extern int cio_set_options (struct subchannel *, int); -extern int cio_get_options (struct subchannel *); extern int cio_update_schib(struct subchannel *sch); extern int cio_commit_config(struct subchannel *sch); int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); int cio_tm_intrg(struct subchannel *sch); -int cio_create_sch_lock(struct subchannel *); -void do_adapter_IO(u8 isc); -void do_IRQ(struct pt_regs *); - /* Use with care. */ #ifdef CONFIG_CCW_CONSOLE extern struct subchannel *cio_probe_console(void); -extern void cio_release_console(void); extern int cio_is_console(struct subchannel_id); -extern struct subchannel *cio_get_console_subchannel(void); -extern spinlock_t * cio_get_console_lock(void); -extern void *cio_get_console_priv(void); +extern void cio_register_early_subchannels(void); +extern void cio_tsch(struct subchannel *sch); #else #define cio_is_console(schid) 0 -#define cio_get_console_subchannel() NULL -#define cio_get_console_lock() NULL -#define cio_get_console_priv() NULL +static inline void cio_register_early_subchannels(void) {} #endif #endif diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 2985eb43948..23054f8fa9f 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/cio/cmf.c - * * Linux on zSeries Channel Measurement Facility support * - * Copyright 2000,2006 IBM Corporation + * Copyright IBM Corp. 2000, 2006 * * Authors: Arnd Bergmann <arndb@de.ibm.com> * Cornelia Huck <cornelia.huck@de.ibm.com> @@ -35,7 +33,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> -#include <linux/timex.h> /* get_clock() */ +#include <linux/timex.h> /* get_tod_clock() */ #include <asm/ccwdev.h> #include <asm/cio.h> @@ -98,7 +96,7 @@ enum cmb_format { * enum cmb_format. */ static int format = CMF_AUTODETECT; -module_param(format, bool, 0444); +module_param(format, bint, 0444); /** * struct cmb_operations - functions to use depending on cmb_format @@ -328,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev) memcpy(cmb_data->last_block, hw_block, cmb_data->size); memcpy(reference_buf, hw_block, cmb_data->size); } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); - cmb_data->last_update = get_clock(); + cmb_data->last_update = get_tod_clock(); kfree(reference_buf); return 0; } @@ -430,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev) memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); cmb_data->last_update = 0; } - cdev->private->cmb_start_time = get_clock(); + cdev->private->cmb_start_time = get_tod_clock(); spin_unlock_irq(cdev->ccwlock); } @@ -1184,7 +1182,7 @@ static ssize_t cmb_enable_store(struct device *dev, int ret; unsigned long val; - ret = strict_strtoul(buf, 16, &val); + ret = kstrtoul(buf, 16, &val); if (ret) return ret; @@ -1341,7 +1339,7 @@ module_init(init_cmf); MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("channel measurement facility base driver\n" - "Copyright 2003 IBM Corporation\n"); + "Copyright IBM Corp. 2003\n"); EXPORT_SYMBOL_GPL(enable_cmf); EXPORT_SYMBOL_GPL(disable_cmf); diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c index 425f741a280..0f8a25f98b1 100644 --- a/drivers/s390/cio/crw.c +++ b/drivers/s390/cio/crw.c @@ -1,7 +1,7 @@ /* * Channel report handling code * - * Copyright IBM Corp. 2000,2009 + * Copyright IBM Corp. 2000, 2009 * Author(s): Ingo Adlung <adlung@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com>, * Cornelia Huck <cornelia.huck@de.ibm.com>, @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/wait.h> #include <asm/crw.h> +#include <asm/ctl_reg.h> static DEFINE_MUTEX(crw_handler_mutex); static crw_handler_t crw_handlers[NR_RSCS]; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 2769da54f2b..0268e5fd59b 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1,7 +1,7 @@ /* * driver for channel subsystem * - * Copyright IBM Corp. 2002, 2009 + * Copyright IBM Corp. 2002, 2010 * * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) @@ -35,6 +35,7 @@ int css_init_done = 0; int max_ssid; struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; +static struct bus_type css_bus_type; int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) @@ -68,7 +69,8 @@ static int call_fn_known_sch(struct device *dev, void *data) struct cb_data *cb = data; int rc = 0; - idset_sch_del(cb->set, sch->schid); + if (cb->set) + idset_sch_del(cb->set, sch->schid); if (cb->fn_known_sch) rc = cb->fn_known_sch(sch, cb->data); return rc; @@ -114,6 +116,13 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), cb.fn_known_sch = fn_known; cb.fn_unknown_sch = fn_unknown; + if (fn_known && !fn_unknown) { + /* Skip idset allocation in case of known-only loop. */ + cb.set = NULL; + return bus_for_each_dev(&css_bus_type, NULL, &cb, + call_fn_known_sch); + } + cb.set = idset_sch_new(); if (!cb.set) /* fall back to brute force scanning in case of oom */ @@ -136,37 +145,53 @@ out: static void css_sch_todo(struct work_struct *work); -static struct subchannel * -css_alloc_subchannel(struct subchannel_id schid) +static int css_sch_create_locks(struct subchannel *sch) +{ + sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); + if (!sch->lock) + return -ENOMEM; + + spin_lock_init(sch->lock); + mutex_init(&sch->reg_mutex); + + return 0; +} + +static void css_subchannel_release(struct device *dev) +{ + struct subchannel *sch = to_subchannel(dev); + + sch->config.intparm = 0; + cio_commit_config(sch); + kfree(sch->lock); + kfree(sch); +} + +struct subchannel *css_alloc_subchannel(struct subchannel_id schid) { struct subchannel *sch; int ret; - sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); - if (sch == NULL) + sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); + if (!sch) return ERR_PTR(-ENOMEM); - ret = cio_validate_subchannel (sch, schid); - if (ret < 0) { - kfree(sch); - return ERR_PTR(ret); - } + + ret = cio_validate_subchannel(sch, schid); + if (ret < 0) + goto err; + + ret = css_sch_create_locks(sch); + if (ret) + goto err; + INIT_WORK(&sch->todo_work, css_sch_todo); + sch->dev.release = &css_subchannel_release; + device_initialize(&sch->dev); return sch; -} -static void -css_subchannel_release(struct device *dev) -{ - struct subchannel *sch; - - sch = to_subchannel(dev); - if (!cio_is_console(sch->schid)) { - /* Reset intparm to zeroes. */ - sch->config.intparm = 0; - cio_commit_config(sch); - kfree(sch->lock); - kfree(sch); - } +err: + kfree(sch); + return ERR_PTR(ret); } static int css_sch_device_register(struct subchannel *sch) @@ -176,7 +201,7 @@ static int css_sch_device_register(struct subchannel *sch) mutex_lock(&sch->reg_mutex); dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, sch->schid.sch_no); - ret = device_register(&sch->dev); + ret = device_add(&sch->dev); mutex_unlock(&sch->reg_mutex); return ret; } @@ -194,51 +219,6 @@ void css_sch_device_unregister(struct subchannel *sch) } EXPORT_SYMBOL_GPL(css_sch_device_unregister); -static void css_sch_todo(struct work_struct *work) -{ - struct subchannel *sch; - enum sch_todo todo; - - sch = container_of(work, struct subchannel, todo_work); - /* Find out todo. */ - spin_lock_irq(sch->lock); - todo = sch->todo; - CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, - sch->schid.sch_no, todo); - sch->todo = SCH_TODO_NOTHING; - spin_unlock_irq(sch->lock); - /* Perform todo. */ - if (todo == SCH_TODO_UNREG) - css_sch_device_unregister(sch); - /* Release workqueue ref. */ - put_device(&sch->dev); -} - -/** - * css_sched_sch_todo - schedule a subchannel operation - * @sch: subchannel - * @todo: todo - * - * Schedule the operation identified by @todo to be performed on the slow path - * workqueue. Do nothing if another operation with higher priority is already - * scheduled. Needs to be called with subchannel lock held. - */ -void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) -{ - CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", - sch->schid.ssid, sch->schid.sch_no, todo); - if (sch->todo >= todo) - return; - /* Get workqueue ref. */ - if (!get_device(&sch->dev)) - return; - sch->todo = todo; - if (!queue_work(cio_work_q, &sch->todo_work)) { - /* Already queued, release workqueue ref. */ - put_device(&sch->dev); - } -} - static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) { int i; @@ -272,16 +252,11 @@ void css_update_ssd_info(struct subchannel *sch) { int ret; - if (cio_is_console(sch->schid)) { - /* Console is initialized too early for functions requiring - * memory allocation. */ + ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); + if (ret) ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); - } else { - ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); - if (ret) - ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); - ssd_register_chpids(&sch->ssd_info); - } + + ssd_register_chpids(&sch->ssd_info); } static ssize_t type_show(struct device *dev, struct device_attribute *attr, @@ -319,14 +294,13 @@ static const struct attribute_group *default_subch_attr_groups[] = { NULL, }; -static int css_register_subchannel(struct subchannel *sch) +int css_register_subchannel(struct subchannel *sch) { int ret; /* Initialize the subchannel structure */ sch->dev.parent = &channel_subsystems[0]->device; sch->dev.bus = &css_bus_type; - sch->dev.release = &css_subchannel_release; sch->dev.groups = default_subch_attr_groups; /* * We don't want to generate uevents for I/O subchannels that don't @@ -358,23 +332,19 @@ static int css_register_subchannel(struct subchannel *sch) return ret; } -int css_probe_device(struct subchannel_id schid) +static int css_probe_device(struct subchannel_id schid) { - int ret; struct subchannel *sch; + int ret; + + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + return PTR_ERR(sch); - if (cio_is_console(schid)) - sch = cio_get_console_subchannel(); - else { - sch = css_alloc_subchannel(schid); - if (IS_ERR(sch)) - return PTR_ERR(sch); - } ret = css_register_subchannel(sch); - if (ret) { - if (!cio_is_console(schid)) - put_device(&sch->dev); - } + if (ret) + put_device(&sch->dev); + return ret; } @@ -421,7 +391,11 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) /* Will be done on the slow path. */ return -EAGAIN; } - if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { + if (stsch_err(schid, &schib)) { + /* Subchannel is not provided. */ + return -ENXIO; + } + if (!css_sch_is_valid(&schib)) { /* Unusable - ignore. */ return 0; } @@ -465,6 +439,66 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow) css_schedule_eval(schid); } +/** + * css_sched_sch_todo - schedule a subchannel operation + * @sch: subchannel + * @todo: todo + * + * Schedule the operation identified by @todo to be performed on the slow path + * workqueue. Do nothing if another operation with higher priority is already + * scheduled. Needs to be called with subchannel lock held. + */ +void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) +{ + CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", + sch->schid.ssid, sch->schid.sch_no, todo); + if (sch->todo >= todo) + return; + /* Get workqueue ref. */ + if (!get_device(&sch->dev)) + return; + sch->todo = todo; + if (!queue_work(cio_work_q, &sch->todo_work)) { + /* Already queued, release workqueue ref. */ + put_device(&sch->dev); + } +} +EXPORT_SYMBOL_GPL(css_sched_sch_todo); + +static void css_sch_todo(struct work_struct *work) +{ + struct subchannel *sch; + enum sch_todo todo; + int ret; + + sch = container_of(work, struct subchannel, todo_work); + /* Find out todo. */ + spin_lock_irq(sch->lock); + todo = sch->todo; + CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, + sch->schid.sch_no, todo); + sch->todo = SCH_TODO_NOTHING; + spin_unlock_irq(sch->lock); + /* Perform todo. */ + switch (todo) { + case SCH_TODO_NOTHING: + break; + case SCH_TODO_EVAL: + ret = css_evaluate_known_subchannel(sch, 1); + if (ret == -EAGAIN) { + spin_lock_irq(sch->lock); + css_sched_sch_todo(sch, todo); + spin_unlock_irq(sch->lock); + } + break; + case SCH_TODO_UNREG: + css_sch_device_unregister(sch); + break; + } + /* Release workqueue ref. */ + put_device(&sch->dev); +} + static struct idset *slow_subchannel_set; static spinlock_t slow_subchannel_lock; static wait_queue_head_t css_eval_wq; @@ -520,10 +554,16 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) case -ENOMEM: case -EIO: /* These should abort looping */ + spin_lock_irq(&slow_subchannel_lock); + idset_sch_del_subseq(slow_subchannel_set, schid); + spin_unlock_irq(&slow_subchannel_lock); break; default: rc = 0; } + /* Allow scheduling here since the containing loop might + * take a while. */ + cond_resched(); } return rc; } @@ -543,7 +583,7 @@ static void css_slow_path_func(struct work_struct *unused) spin_unlock_irqrestore(&slow_subchannel_lock, flags); } -static DECLARE_WORK(slow_path_work, css_slow_path_func); +static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func); struct workqueue_struct *cio_work_q; void css_schedule_eval(struct subchannel_id schid) @@ -553,7 +593,7 @@ void css_schedule_eval(struct subchannel_id schid) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_sch_add(slow_subchannel_set, schid); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, 0); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -564,7 +604,7 @@ void css_schedule_eval_all(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_fill(slow_subchannel_set); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, 0); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -577,7 +617,7 @@ static int __unset_registered(struct device *dev, void *data) return 0; } -void css_schedule_eval_all_unreg(void) +void css_schedule_eval_all_unreg(unsigned long delay) { unsigned long flags; struct idset *unreg_set; @@ -595,7 +635,7 @@ void css_schedule_eval_all_unreg(void) spin_lock_irqsave(&slow_subchannel_lock, flags); idset_add_set(slow_subchannel_set, unreg_set); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, delay); spin_unlock_irqrestore(&slow_subchannel_lock, flags); idset_free(unreg_set); } @@ -608,7 +648,8 @@ void css_wait_for_slow_path(void) /* Schedule reprobing of all unregistered subchannels. */ void css_schedule_reprobe(void) { - css_schedule_eval_all_unreg(); + /* Schedule with a delay to allow merging of subsequent calls. */ + css_schedule_eval_all_unreg(1 * HZ); } EXPORT_SYMBOL_GPL(css_schedule_reprobe); @@ -618,6 +659,7 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe); static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct subchannel_id mchk_schid; + struct subchannel *sch; if (overflow) { css_schedule_eval_all(); @@ -635,8 +677,15 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) init_subchannel_id(&mchk_schid); mchk_schid.sch_no = crw0->rsid; if (crw1) - mchk_schid.ssid = (crw1->rsid >> 8) & 3; + mchk_schid.ssid = (crw1->rsid >> 4) & 3; + if (crw0->erc == CRW_ERC_PMOD) { + sch = get_subchannel_by_schid(mchk_schid); + if (sch) { + css_update_ssd_info(sch); + put_device(&sch->dev); + } + } /* * Since we are always presented with IPI in the CRW, we have to * use stsch() to find out if the subchannel in question has come @@ -648,6 +697,8 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) static void __init css_generate_pgid(struct channel_subsystem *css, u32 tod_high) { + struct cpuid cpu_id; + if (css_general_characteristics.mcss) { css->global_pgid.pgid_high.ext_cssid.version = 0x80; css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; @@ -658,8 +709,9 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high) css->global_pgid.pgid_high.cpu_addr = 0; #endif } - css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident; - css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine; + get_cpu_id(&cpu_id); + css->global_pgid.cpu_id = cpu_id.ident; + css->global_pgid.cpu_model = cpu_id.machine; css->global_pgid.tod_high = tod_high; } @@ -702,7 +754,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr, int ret; unsigned long val; - ret = strict_strtoul(buf, 16, &val); + ret = kstrtoul(buf, 16, &val); if (ret) return ret; mutex_lock(&css->mutex); @@ -738,7 +790,7 @@ static int __init setup_css(int nr) css->pseudo_subchannel->dev.release = css_subchannel_release; dev_set_name(&css->pseudo_subchannel->dev, "defunct"); mutex_init(&css->pseudo_subchannel->reg_mutex); - ret = cio_create_sch_lock(css->pseudo_subchannel); + ret = css_sch_create_locks(css->pseudo_subchannel); if (ret) { kfree(css->pseudo_subchannel); return ret; @@ -748,7 +800,7 @@ static int __init setup_css(int nr) css->cssid = nr; dev_set_name(&css->device, "css%x", nr); css->device.release = channel_subsystem_release; - tod_high = (u32) (get_clock() >> 32); + tod_high = (u32) (get_tod_clock() >> 32); css_generate_pgid(css, tod_high); return 0; } @@ -787,7 +839,6 @@ static struct notifier_block css_reboot_notifier = { static int css_power_event(struct notifier_block *this, unsigned long event, void *ptr) { - void *secm_area; int ret, i; switch (event) { @@ -803,15 +854,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event, mutex_unlock(&css->mutex); continue; } - secm_area = (void *)get_zeroed_page(GFP_KERNEL | - GFP_DMA); - if (secm_area) { - if (__chsc_do_secm(css, 0, secm_area)) - ret = NOTIFY_BAD; - free_page((unsigned long)secm_area); - } else - ret = NOTIFY_BAD; - + ret = __chsc_do_secm(css, 0); + ret = notifier_from_errno(ret); mutex_unlock(&css->mutex); } break; @@ -827,15 +871,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event, mutex_unlock(&css->mutex); continue; } - secm_area = (void *)get_zeroed_page(GFP_KERNEL | - GFP_DMA); - if (secm_area) { - if (__chsc_do_secm(css, 1, secm_area)) - ret = NOTIFY_BAD; - free_page((unsigned long)secm_area); - } else - ret = NOTIFY_BAD; - + ret = __chsc_do_secm(css, 1); + ret = notifier_from_errno(ret); mutex_unlock(&css->mutex); } /* search for subchannels, which appeared during hibernation */ @@ -853,32 +890,23 @@ static struct notifier_block css_power_notifier = { /* * Now that the driver core is running, we can setup our channel subsystem. - * The struct subchannel's are created during probing (except for the - * static console subchannel). + * The struct subchannel's are created during probing. */ static int __init css_bus_init(void) { int ret, i; - ret = chsc_determine_css_characteristics(); - if (ret == -ENOMEM) - goto out; - - ret = chsc_alloc_sei_area(); + ret = chsc_init(); if (ret) - goto out; + return ret; + chsc_determine_css_characteristics(); /* Try to enable MSS. */ ret = chsc_enable_facility(CHSC_SDA_OC_MSS); - switch (ret) { - case 0: /* Success. */ - max_ssid = __MAX_SSID; - break; - case -ENOMEM: - goto out; - default: + if (ret) max_ssid = 0; - } + else /* Success. */ + max_ssid = __MAX_SSID; ret = slow_subchannel_init(); if (ret) @@ -958,9 +986,9 @@ out_unregister: } bus_unregister(&css_bus_type); out: - crw_unregister_handler(CRW_RSC_CSS); - chsc_free_sei_area(); + crw_unregister_handler(CRW_RSC_SCH); idset_free(slow_subchannel_set); + chsc_init_cleanup(); pr_alert("The CSS device driver initialization failed with " "errno=%d\n", ret); return ret; @@ -980,9 +1008,9 @@ static void __init css_bus_cleanup(void) device_unregister(&css->device); } bus_unregister(&css_bus_type); - crw_unregister_handler(CRW_RSC_CSS); - chsc_free_sei_area(); + crw_unregister_handler(CRW_RSC_SCH); idset_free(slow_subchannel_set); + chsc_init_cleanup(); isc_unregister(IO_SCH_ISC); } @@ -1041,6 +1069,8 @@ int css_complete_work(void) */ static int __init channel_subsystem_init_sync(void) { + /* Register subchannels which are already in use. */ + cio_register_early_subchannels(); /* Start initial subchannel evaluation. */ css_schedule_eval_all(); css_complete_work(); @@ -1048,6 +1078,19 @@ static int __init channel_subsystem_init_sync(void) } subsys_initcall_sync(channel_subsystem_init_sync); +void channel_subsystem_reinit(void) +{ + struct channel_path *chp; + struct chp_id chpid; + + chsc_enable_facility(CHSC_SDA_OC_MSS); + chp_id_for_each(&chpid) { + chp = chpid_to_chp(chpid); + if (chp) + chp_update_desc(chp); + } +} + #ifdef CONFIG_PROC_FS static ssize_t cio_settle_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) @@ -1062,7 +1105,9 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf, } static const struct file_operations cio_settle_proc_fops = { + .open = nonseekable_open, .write = cio_settle_write, + .llseek = no_llseek, }; static int __init cio_settle_init(void) @@ -1195,6 +1240,7 @@ static int css_pm_restore(struct device *dev) struct subchannel *sch = to_subchannel(dev); struct css_driver *drv; + css_update_ssd_info(sch); if (!sch->dev.driver) return 0; drv = to_cssdriver(sch->dev.driver); @@ -1209,7 +1255,7 @@ static const struct dev_pm_ops css_pm_ops = { .restore = css_pm_restore, }; -struct bus_type css_bus_type = { +static struct bus_type css_bus_type = { .name = "css", .match = css_bus_match, .probe = css_probe, @@ -1228,9 +1274,7 @@ struct bus_type css_bus_type = { */ int css_driver_register(struct css_driver *cdrv) { - cdrv->drv.name = cdrv->name; cdrv->drv.bus = &css_bus_type; - cdrv->drv.owner = cdrv->owner; return driver_register(&cdrv->drv); } EXPORT_SYMBOL_GPL(css_driver_register); @@ -1248,4 +1292,3 @@ void css_driver_unregister(struct css_driver *cdrv) EXPORT_SYMBOL_GPL(css_driver_unregister); MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(css_bus_type); diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 7e37886de23..2c9107e2025 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -63,7 +63,6 @@ struct subchannel; struct chp_link; /** * struct css_driver - device driver for subchannels - * @owner: owning module * @subchannel_type: subchannel type supported by this driver * @drv: embedded device driver structure * @irq: called on interrupts @@ -78,10 +77,8 @@ struct chp_link; * @thaw: undo work done in @freeze * @restore: callback for restoring after hibernation * @settle: wait for asynchronous work to finish - * @name: name of the device driver */ struct css_driver { - struct module *owner; struct css_device_id *subchannel_type; struct device_driver drv; void (*irq)(struct subchannel *); @@ -96,21 +93,16 @@ struct css_driver { int (*thaw) (struct subchannel *); int (*restore)(struct subchannel *); int (*settle)(void); - const char *name; }; #define to_cssdriver(n) container_of(n, struct css_driver, drv) -/* - * all css_drivers have the css_bus_type - */ -extern struct bus_type css_bus_type; - extern int css_driver_register(struct css_driver *); extern void css_driver_unregister(struct css_driver *); extern void css_sch_device_unregister(struct subchannel *); -extern int css_probe_device(struct subchannel_id); +extern int css_register_subchannel(struct subchannel *); +extern struct subchannel *css_alloc_subchannel(struct subchannel_id); extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; extern int max_ssid; @@ -118,12 +110,8 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), int (*fn_unknown)(struct subchannel_id, void *), void *data); extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); -extern void css_reiterate_subchannels(void); void css_update_ssd_info(struct subchannel *sch); -#define __MAX_SUBCHANNEL 65535 -#define __MAX_SSID 3 - struct channel_subsystem { u8 cssid; int valid; @@ -140,12 +128,12 @@ struct channel_subsystem { }; #define to_css(dev) container_of(dev, struct channel_subsystem, device) -extern struct bus_type css_bus_type; extern struct channel_subsystem *channel_subsystems[]; /* Helper functions to build lists for the slow path. */ void css_schedule_eval(struct subchannel_id schid); void css_schedule_eval_all(void); +void css_schedule_eval_all_unreg(unsigned long delay); int css_complete_work(void); int sch_is_pseudo_sch(struct subchannel *); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 6d229f3523a..dfef5e63cb7 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1,8 +1,7 @@ /* - * drivers/s390/cio/device.c * bus driver for ccw devices * - * Copyright IBM Corp. 2002,2008 + * Copyright IBM Corp. 2002, 2008 * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) @@ -20,7 +19,9 @@ #include <linux/list.h> #include <linux/device.h> #include <linux/workqueue.h> +#include <linux/delay.h> #include <linux/timer.h> +#include <linux/kernel_stat.h> #include <asm/ccwdev.h> #include <asm/cio.h> @@ -36,12 +37,17 @@ #include "ioasm.h" #include "io_sch.h" #include "blacklist.h" +#include "chsc.h" static struct timer_list recovery_timer; static DEFINE_SPINLOCK(recovery_lock); static int recovery_phase; static const unsigned long recovery_delay[] = { 3, 30, 300 }; +static atomic_t ccw_device_init_count = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); +static struct bus_type ccw_bus_type; + /******************* bus type handling ***********************/ /* The Linux driver model distinguishes between a bus type and @@ -126,8 +132,6 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) return ret; } -struct bus_type ccw_bus_type; - static void io_subchannel_irq(struct subchannel *); static int io_subchannel_probe(struct subchannel *); static int io_subchannel_remove(struct subchannel *); @@ -136,8 +140,6 @@ static int io_subchannel_sch_event(struct subchannel *, int); static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, int); static void recovery_func(unsigned long data); -wait_queue_head_t ccw_device_init_wq; -atomic_t ccw_device_init_count; static struct css_device_id io_subchannel_ids[] = { { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, @@ -171,9 +173,11 @@ static int io_subchannel_settle(void) } static struct css_driver io_subchannel_driver = { - .owner = THIS_MODULE, + .drv = { + .owner = THIS_MODULE, + .name = "io_subchannel", + }, .subchannel_type = io_subchannel_ids, - .name = "io_subchannel", .irq = io_subchannel_irq, .sch_event = io_subchannel_sch_event, .chp_event = io_subchannel_chp_event, @@ -188,10 +192,7 @@ int __init io_subchannel_init(void) { int ret; - init_waitqueue_head(&ccw_device_init_wq); - atomic_set(&ccw_device_init_count, 0); setup_timer(&recovery_timer, recovery_func, 0); - ret = bus_register(&ccw_bus_type); if (ret) return ret; @@ -332,9 +333,9 @@ int ccw_device_set_offline(struct ccw_device *cdev) if (ret != 0) return ret; } - cdev->online = 0; spin_lock_irq(cdev->ccwlock); sch = to_subchannel(cdev->dev.parent); + cdev->online = 0; /* Wait until a final state or DISCONNECTED is reached */ while (!dev_fsm_final_state(cdev) && cdev->private->state != DEV_STATE_DISCONNECTED) { @@ -445,7 +446,10 @@ int ccw_device_set_online(struct ccw_device *cdev) ret = cdev->drv->set_online(cdev); if (ret) goto rollback; + + spin_lock_irq(cdev->ccwlock); cdev->online = 1; + spin_unlock_irq(cdev->ccwlock); return 0; rollback: @@ -486,9 +490,11 @@ static int online_store_handle_offline(struct ccw_device *cdev) spin_lock_irq(cdev->ccwlock); ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); spin_unlock_irq(cdev->ccwlock); - } else if (cdev->online && cdev->drv && cdev->drv->set_offline) + return 0; + } + if (cdev->drv && cdev->drv->set_offline) return ccw_device_set_offline(cdev); - return 0; + return -EINVAL; } static int online_store_recog_and_online(struct ccw_device *cdev) @@ -505,8 +511,8 @@ static int online_store_recog_and_online(struct ccw_device *cdev) return -EAGAIN; } if (cdev->drv && cdev->drv->set_online) - ccw_device_set_online(cdev); - return 0; + return ccw_device_set_online(cdev); + return -EINVAL; } static int online_store_handle_online(struct ccw_device *cdev, int force) @@ -536,15 +542,19 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, int force, ret; unsigned long i; - if (!dev_fsm_final_state(cdev) && - cdev->private->state != DEV_STATE_DISCONNECTED) - return -EAGAIN; + /* Prevent conflict between multiple on-/offline processing requests. */ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) return -EAGAIN; - - if (cdev->drv && !try_module_get(cdev->drv->owner)) { - atomic_set(&cdev->private->onoff, 0); - return -EINVAL; + /* Prevent conflict between internal I/Os and on-/offline processing. */ + if (!dev_fsm_final_state(cdev) && + cdev->private->state != DEV_STATE_DISCONNECTED) { + ret = -EAGAIN; + goto out; + } + /* Prevent conflict between pending work and on-/offline processing.*/ + if (work_pending(&cdev->private->todo_work)) { + ret = -EAGAIN; + goto out; } if (!strncmp(buf, "force\n", count)) { force = 1; @@ -552,10 +562,12 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, ret = 0; } else { force = 0; - ret = strict_strtoul(buf, 16, &i); + ret = kstrtoul(buf, 16, &i); } if (ret) goto out; + + device_lock(dev); switch (i) { case 0: ret = online_store_handle_offline(cdev); @@ -566,9 +578,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, default: ret = -EINVAL; } + device_unlock(dev); + out: - if (cdev->drv) - module_put(cdev->drv->owner); atomic_set(&cdev->private->onoff, 0); return (ret < 0) ? ret : count; } @@ -598,6 +610,33 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf) } } +static ssize_t +initiate_logging(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct subchannel *sch = to_subchannel(dev); + int rc; + + rc = chsc_siosl(sch->schid); + if (rc < 0) { + pr_warning("Logging for subchannel 0.%x.%04x failed with " + "errno=%d\n", + sch->schid.ssid, sch->schid.sch_no, rc); + return rc; + } + pr_notice("Logging for subchannel 0.%x.%04x was triggered\n", + sch->schid.ssid, sch->schid.sch_no); + return count; +} + +static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + + return sprintf(buf, "%02x\n", sch->vpm); +} + static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); @@ -605,10 +644,14 @@ static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); static DEVICE_ATTR(online, 0644, online_show, online_store); static DEVICE_ATTR(availability, 0444, available_show, NULL); +static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); +static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); static struct attribute *io_subchannel_attrs[] = { &dev_attr_chpids.attr, &dev_attr_pimpampom.attr, + &dev_attr_logging.attr, + &dev_attr_vpm.attr, NULL, }; @@ -635,18 +678,11 @@ static const struct attribute_group *ccwdev_attr_groups[] = { NULL, }; -/* this is a simple abstraction for device_register that sets the - * correct bus type and adds the bus specific files */ -static int ccw_device_register(struct ccw_device *cdev) +static int ccw_device_add(struct ccw_device *cdev) { struct device *dev = &cdev->dev; - int ret; dev->bus = &ccw_bus_type; - ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, - cdev->private->dev_id.devno); - if (ret) - return ret; return device_add(dev); } @@ -658,7 +694,17 @@ static int match_dev_id(struct device *dev, void *data) return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); } -static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) +/** + * get_ccwdev_by_dev_id() - obtain device from a ccw device id + * @dev_id: id of the device to be searched + * + * This function searches all devices attached to the ccw bus for a device + * matching @dev_id. + * Returns: + * If a device is found its reference count is increased and returned; + * else %NULL is returned. + */ +struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) { struct device *dev; @@ -666,6 +712,7 @@ static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) return dev ? to_ccwdev(dev) : NULL; } +EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id); static void ccw_device_do_unbind_bind(struct ccw_device *cdev) { @@ -710,21 +757,46 @@ static void ccw_device_todo(struct work_struct *work); static int io_subchannel_initialize_dev(struct subchannel *sch, struct ccw_device *cdev) { - cdev->private->cdev = cdev; - atomic_set(&cdev->private->onoff, 0); + struct ccw_device_private *priv = cdev->private; + int ret; + + priv->cdev = cdev; + priv->int_class = IRQIO_CIO; + priv->state = DEV_STATE_NOT_OPER; + priv->dev_id.devno = sch->schib.pmcw.dev; + priv->dev_id.ssid = sch->schid.ssid; + priv->schid = sch->schid; + + INIT_WORK(&priv->todo_work, ccw_device_todo); + INIT_LIST_HEAD(&priv->cmb_list); + init_waitqueue_head(&priv->wait_q); + init_timer(&priv->timer); + + atomic_set(&priv->onoff, 0); + cdev->ccwlock = sch->lock; cdev->dev.parent = &sch->dev; cdev->dev.release = ccw_device_release; - INIT_WORK(&cdev->private->todo_work, ccw_device_todo); cdev->dev.groups = ccwdev_attr_groups; /* Do first half of device_register. */ device_initialize(&cdev->dev); + ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + if (ret) + goto out_put; if (!get_device(&sch->dev)) { - /* Release reference from device_initialize(). */ - put_device(&cdev->dev); - return -ENODEV; + ret = -ENODEV; + goto out_put; } - cdev->private->flags.initialized = 1; + priv->flags.initialized = 1; + spin_lock_irq(sch->lock); + sch_set_cdev(sch, cdev); + spin_unlock_irq(sch->lock); return 0; + +out_put: + /* Release reference from device_initialize(). */ + put_device(&cdev->dev); + return ret; } static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) @@ -803,7 +875,7 @@ static void io_subchannel_register(struct ccw_device *cdev) dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); /* make it known to the system */ - ret = ccw_device_register(cdev); + ret = ccw_device_add(cdev); if (ret) { CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", cdev->private->dev_id.ssid, @@ -868,26 +940,11 @@ io_subchannel_recog_done(struct ccw_device *cdev) static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) { - struct ccw_device_private *priv; - - cdev->ccwlock = sch->lock; - - /* Init private data. */ - priv = cdev->private; - priv->dev_id.devno = sch->schib.pmcw.dev; - priv->dev_id.ssid = sch->schid.ssid; - priv->schid = sch->schid; - priv->state = DEV_STATE_NOT_OPER; - INIT_LIST_HEAD(&priv->cmb_list); - init_waitqueue_head(&priv->wait_q); - init_timer(&priv->timer); - /* Increase counter of devices currently in recognition. */ atomic_inc(&ccw_device_init_count); /* Start async. device sensing. */ spin_lock_irq(sch->lock); - sch_set_cdev(sch, cdev); ccw_device_recognition(cdev); spin_unlock_irq(sch->lock); } @@ -974,6 +1031,8 @@ static void io_subchannel_irq(struct subchannel *sch) CIO_TRACE_EVENT(6, dev_name(&sch->dev)); if (cdev) dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); + else + inc_irq_stat(IRQIO_CIO); } void io_subchannel_init_config(struct subchannel *sch) @@ -1006,6 +1065,7 @@ static void io_subchannel_init_fields(struct subchannel *sch) */ static int io_subchannel_probe(struct subchannel *sch) { + struct io_subchannel_private *io_priv; struct ccw_device *cdev; int rc; @@ -1025,19 +1085,14 @@ static int io_subchannel_probe(struct subchannel *sch) dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); cdev = sch_get_cdev(sch); - cdev->dev.groups = ccwdev_attr_groups; - device_initialize(&cdev->dev); - cdev->private->flags.initialized = 1; - ccw_device_register(cdev); - /* - * Check if the device is already online. If it is - * the reference count needs to be corrected since we - * didn't obtain a reference in ccw_device_set_online. - */ - if (cdev->private->state != DEV_STATE_NOT_OPER && - cdev->private->state != DEV_STATE_OFFLINE && - cdev->private->state != DEV_STATE_BOXED) - get_device(&cdev->dev); + rc = ccw_device_add(cdev); + if (rc) { + /* Release online reference. */ + put_device(&cdev->dev); + goto out_schedule; + } + if (atomic_dec_and_test(&ccw_device_init_count)) + wake_up(&ccw_device_init_wq); return 0; } io_subchannel_init_fields(sch); @@ -1049,10 +1104,11 @@ static int io_subchannel_probe(struct subchannel *sch) if (rc) goto out_schedule; /* Allocate I/O subchannel private data. */ - sch->private = kzalloc(sizeof(struct io_subchannel_private), - GFP_KERNEL | GFP_DMA); - if (!sch->private) + io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); + if (!io_priv) goto out_schedule; + + set_io_private(sch, io_priv); css_schedule_eval(sch->schid); return 0; @@ -1066,6 +1122,7 @@ out_schedule: static int io_subchannel_remove (struct subchannel *sch) { + struct io_subchannel_private *io_priv = to_io_private(sch); struct ccw_device *cdev; cdev = sch_get_cdev(sch); @@ -1075,11 +1132,12 @@ io_subchannel_remove (struct subchannel *sch) /* Set ccw device to not operational and drop reference. */ spin_lock_irq(cdev->ccwlock); sch_set_cdev(sch, NULL); + set_io_private(sch, NULL); cdev->private->state = DEV_STATE_NOT_OPER; spin_unlock_irq(cdev->ccwlock); ccw_device_unregister(cdev); out_free: - kfree(sch->private); + kfree(io_priv); sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); return 0; } @@ -1123,6 +1181,7 @@ err: static int io_subchannel_chp_event(struct subchannel *sch, struct chp_link *link, int event) { + struct ccw_device *cdev = sch_get_cdev(sch); int mask; mask = chp_ssd_get_mask(&sch->ssd_info, link); @@ -1132,22 +1191,30 @@ static int io_subchannel_chp_event(struct subchannel *sch, case CHP_VARY_OFF: sch->opm &= ~mask; sch->lpm &= ~mask; + if (cdev) + cdev->private->path_gone_mask |= mask; io_subchannel_terminate_path(sch, mask); break; case CHP_VARY_ON: sch->opm |= mask; sch->lpm |= mask; + if (cdev) + cdev->private->path_new_mask |= mask; io_subchannel_verify(sch); break; case CHP_OFFLINE: if (cio_update_schib(sch)) return -ENODEV; + if (cdev) + cdev->private->path_gone_mask |= mask; io_subchannel_terminate_path(sch, mask); break; case CHP_ONLINE: if (cio_update_schib(sch)) return -ENODEV; sch->lpm |= mask & sch->opm; + if (cdev) + cdev->private->path_new_mask |= mask; io_subchannel_verify(sch); break; } @@ -1172,6 +1239,7 @@ static void io_subchannel_quiesce(struct subchannel *sch) cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); while (ret == -EBUSY) { cdev->private->state = DEV_STATE_QUIESCE; + cdev->private->iretry = 255; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, HZ/10); @@ -1271,10 +1339,12 @@ static int purge_fn(struct device *dev, void *data) spin_lock_irq(cdev->ccwlock); if (is_blacklisted(id->ssid, id->devno) && - (cdev->private->state == DEV_STATE_OFFLINE)) { + (cdev->private->state == DEV_STATE_OFFLINE) && + (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, id->devno); ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); + atomic_set(&cdev->private->onoff, 0); } spin_unlock_irq(cdev->ccwlock); /* Abort loop in case of pending signal. */ @@ -1358,8 +1428,10 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) } if (device_is_disconnected(cdev)) return IO_SCH_REPROBE; - if (cdev->online) + if (cdev->online && !cdev->private->flags.resuming) return IO_SCH_VERIFY; + if (cdev->private->state == DEV_STATE_NOT_OPER) + return IO_SCH_UNREG_ATTACH; return IO_SCH_NOP; } @@ -1401,12 +1473,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) rc = 0; goto out_unlock; case IO_SCH_VERIFY: - if (cdev->private->flags.resuming == 1) { - if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) { - ccw_device_set_notoper(cdev); - break; - } - } /* Trigger path verification. */ io_subchannel_verify(sch); rc = 0; @@ -1421,7 +1487,16 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) break; case IO_SCH_UNREG_ATTACH: case IO_SCH_UNREG: - if (cdev) + if (!cdev) + break; + if (cdev->private->state == DEV_STATE_SENSE_ID) { + /* + * Note: delayed work triggered by this event + * and repeated calls to sch_event are synchronized + * by the above check for work_pending(cdev). + */ + dev_fsm_event(cdev, DEV_EVENT_NOTOPER); + } else ccw_device_set_notoper(cdev); break; case IO_SCH_NOP: @@ -1444,9 +1519,16 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) goto out; break; case IO_SCH_UNREG_ATTACH: + spin_lock_irqsave(sch->lock, flags); + if (cdev->private->flags.resuming) { + /* Device will be handled later. */ + rc = 0; + goto out_unlock; + } + sch_set_cdev(sch, NULL); + spin_unlock_irqrestore(sch->lock, flags); /* Unregister ccw device. */ - if (!cdev->private->flags.resuming) - ccw_device_unregister(cdev); + ccw_device_unregister(cdev); break; default: break; @@ -1491,87 +1573,121 @@ out: return rc; } -#ifdef CONFIG_CCW_CONSOLE -static struct ccw_device console_cdev; -static struct ccw_device_private console_private; -static int console_cdev_in_use; - -static DEFINE_SPINLOCK(ccw_console_lock); - -spinlock_t * cio_get_console_lock(void) +static void ccw_device_set_int_class(struct ccw_device *cdev) { - return &ccw_console_lock; + struct ccw_driver *cdrv = cdev->drv; + + /* Note: we interpret class 0 in this context as an uninitialized + * field since it translates to a non-I/O interrupt class. */ + if (cdrv->int_class != 0) + cdev->private->int_class = cdrv->int_class; + else + cdev->private->int_class = IRQIO_CIO; } -static int ccw_device_console_enable(struct ccw_device *cdev, - struct subchannel *sch) +#ifdef CONFIG_CCW_CONSOLE +int __init ccw_device_enable_console(struct ccw_device *cdev) { + struct subchannel *sch = to_subchannel(cdev->dev.parent); int rc; - /* Attach subchannel private data. */ - sch->private = cio_get_console_priv(); - memset(sch->private, 0, sizeof(struct io_subchannel_private)); + if (!cdev->drv || !cdev->handler) + return -EINVAL; + io_subchannel_init_fields(sch); rc = cio_commit_config(sch); if (rc) return rc; sch->driver = &io_subchannel_driver; - /* Initialize the ccw_device structure. */ - cdev->dev.parent= &sch->dev; - sch_set_cdev(sch, cdev); io_subchannel_recog(cdev, sch); /* Now wait for the async. recognition to come to an end. */ spin_lock_irq(cdev->ccwlock); while (!dev_fsm_final_state(cdev)) - wait_cons_dev(); - rc = -EIO; - if (cdev->private->state != DEV_STATE_OFFLINE) + ccw_device_wait_idle(cdev); + + /* Hold on to an extra reference while device is online. */ + get_device(&cdev->dev); + rc = ccw_device_online(cdev); + if (rc) goto out_unlock; - ccw_device_online(cdev); + while (!dev_fsm_final_state(cdev)) - wait_cons_dev(); - if (cdev->private->state != DEV_STATE_ONLINE) - goto out_unlock; - rc = 0; + ccw_device_wait_idle(cdev); + + if (cdev->private->state == DEV_STATE_ONLINE) + cdev->online = 1; + else + rc = -EIO; out_unlock: spin_unlock_irq(cdev->ccwlock); + if (rc) /* Give up online reference since onlining failed. */ + put_device(&cdev->dev); return rc; } -struct ccw_device * -ccw_device_probe_console(void) +struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) { + struct io_subchannel_private *io_priv; + struct ccw_device *cdev; struct subchannel *sch; - int ret; - if (xchg(&console_cdev_in_use, 1) != 0) - return ERR_PTR(-EBUSY); sch = cio_probe_console(); - if (IS_ERR(sch)) { - console_cdev_in_use = 0; - return (void *) sch; + if (IS_ERR(sch)) + return ERR_CAST(sch); + + io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); + if (!io_priv) { + put_device(&sch->dev); + return ERR_PTR(-ENOMEM); } - memset(&console_cdev, 0, sizeof(struct ccw_device)); - memset(&console_private, 0, sizeof(struct ccw_device_private)); - console_cdev.private = &console_private; - console_private.cdev = &console_cdev; - ret = ccw_device_console_enable(&console_cdev, sch); - if (ret) { - cio_release_console(); - console_cdev_in_use = 0; - return ERR_PTR(ret); + set_io_private(sch, io_priv); + cdev = io_subchannel_create_ccwdev(sch); + if (IS_ERR(cdev)) { + put_device(&sch->dev); + kfree(io_priv); + return cdev; + } + cdev->drv = drv; + ccw_device_set_int_class(cdev); + return cdev; +} + +void __init ccw_device_destroy_console(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct io_subchannel_private *io_priv = to_io_private(sch); + + set_io_private(sch, NULL); + put_device(&sch->dev); + put_device(&cdev->dev); + kfree(io_priv); +} + +/** + * ccw_device_wait_idle() - busy wait for device to become idle + * @cdev: ccw device + * + * Poll until activity control is zero, that is, no function or data + * transfer is pending/active. + * Called with device lock being held. + */ +void ccw_device_wait_idle(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + while (1) { + cio_tsch(sch); + if (sch->schib.scsw.cmd.actl == 0) + break; + udelay_simple(100); } - console_cdev.online = 1; - return &console_cdev; } static int ccw_device_pm_restore(struct device *dev); -int ccw_device_force_console(void) +int ccw_device_force_console(struct ccw_device *cdev) { - if (!console_cdev_in_use) - return -ENODEV; - return ccw_device_pm_restore(&console_cdev.dev); + return ccw_device_pm_restore(&cdev->dev); } EXPORT_SYMBOL_GPL(ccw_device_force_console); #endif @@ -1605,15 +1721,9 @@ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id) { struct device *dev; - struct device_driver *drv; - - drv = get_driver(&cdrv->driver); - if (!drv) - return NULL; - dev = driver_find_device(drv, NULL, (void *)bus_id, + dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, __ccwdev_check_busid); - put_driver(drv); return dev ? to_ccwdev(dev) : NULL; } @@ -1636,19 +1746,18 @@ ccw_device_probe (struct device *dev) int ret; cdev->drv = cdrv; /* to let the driver call _set_online */ - + ccw_device_set_int_class(cdev); ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; - if (ret) { cdev->drv = NULL; + cdev->private->int_class = IRQIO_CIO; return ret; } return 0; } -static int -ccw_device_remove (struct device *dev) +static int ccw_device_remove(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_driver *cdrv = cdev->drv; @@ -1656,9 +1765,10 @@ ccw_device_remove (struct device *dev) if (cdrv->remove) cdrv->remove(cdev); + + spin_lock_irq(cdev->ccwlock); if (cdev->online) { cdev->online = 0; - spin_lock_irq(cdev->ccwlock); ret = ccw_device_offline(cdev); spin_unlock_irq(cdev->ccwlock); if (ret == 0) @@ -1671,9 +1781,12 @@ ccw_device_remove (struct device *dev) cdev->private->dev_id.devno); /* Give up reference obtained in ccw_device_set_online(). */ put_device(&cdev->dev); + spin_lock_irq(cdev->ccwlock); } ccw_device_set_timeout(cdev, 0); cdev->drv = NULL; + cdev->private->int_class = IRQIO_CIO; + spin_unlock_irq(cdev->ccwlock); return 0; } @@ -1788,9 +1901,10 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev) * available again. Kick re-detection. */ cdev->private->flags.resuming = 1; - css_schedule_eval(sch->schid); + cdev->private->path_new_mask = LPM_ANYPATH; + css_sched_sch_todo(sch, SCH_TODO_EVAL); spin_unlock_irq(sch->lock); - css_complete_work(); + css_wait_for_slow_path(); /* cdev may have been moved to a different subchannel. */ sch = to_subchannel(cdev->dev.parent); @@ -1915,7 +2029,7 @@ static const struct dev_pm_ops ccw_pm_ops = { .restore = ccw_device_pm_restore, }; -struct bus_type ccw_bus_type = { +static struct bus_type ccw_bus_type = { .name = "ccw", .match = ccw_bus_match, .uevent = ccw_uevent, @@ -1938,8 +2052,6 @@ int ccw_driver_register(struct ccw_driver *cdriver) struct device_driver *drv = &cdriver->driver; drv->bus = &ccw_bus_type; - drv->name = cdriver->name; - drv->owner = cdriver->owner; return driver_register(drv); } @@ -1955,16 +2067,6 @@ void ccw_driver_unregister(struct ccw_driver *cdriver) driver_unregister(&cdriver->driver); } -/* Helper func for qdio. */ -struct subchannel_id -ccw_device_get_subchannel_id(struct ccw_device *cdev) -{ - struct subchannel *sch; - - sch = to_subchannel(cdev->dev.parent); - return sch->schid; -} - static void ccw_device_todo(struct work_struct *work) { struct ccw_device_private *priv; @@ -2036,11 +2138,24 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) } } +/** + * ccw_device_siosl() - initiate logging + * @cdev: ccw device + * + * This function is used to invoke model-dependent logging within the channel + * subsystem. + */ +int ccw_device_siosl(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + return chsc_siosl(sch->schid); +} +EXPORT_SYMBOL_GPL(ccw_device_siosl); + MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_online); EXPORT_SYMBOL(ccw_device_set_offline); EXPORT_SYMBOL(ccw_driver_register); EXPORT_SYMBOL(ccw_driver_unregister); EXPORT_SYMBOL(get_ccwdev_by_busid); -EXPORT_SYMBOL(ccw_bus_type); -EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 379de2d1ec4..8d1d2987317 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -2,9 +2,10 @@ #define S390_DEVICE_H #include <asm/ccwdev.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <linux/wait.h> #include <linux/notifier.h> +#include <linux/kernel_stat.h> #include "io_sch.h" /* @@ -56,7 +57,16 @@ extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS]; static inline void dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event) { - dev_jumptable[cdev->private->state][dev_event](cdev, dev_event); + int state = cdev->private->state; + + if (dev_event == DEV_EVENT_INTERRUPT) { + if (state == DEV_STATE_ONLINE) + inc_irq_stat(cdev->private->int_class); + else if (state != DEV_STATE_CMFCHANGE && + state != DEV_STATE_CMFUPDATE) + inc_irq_stat(IRQIO_CIO); + } + dev_jumptable[state][dev_event](cdev, dev_event); } /* @@ -71,8 +81,6 @@ dev_fsm_final_state(struct ccw_device *cdev) cdev->private->state == DEV_STATE_BOXED); } -extern wait_queue_head_t ccw_device_init_wq; -extern atomic_t ccw_device_init_count; int __init io_subchannel_init(void); void io_subchannel_recog_done(struct ccw_device *cdev); @@ -90,6 +98,7 @@ int ccw_device_test_sense_data(struct ccw_device *); void ccw_device_schedule_sch_unregister(struct ccw_device *); int ccw_purge_blacklisted(void); void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo); +struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id); /* Function prototypes for device status and basic sense stuff. */ void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); @@ -130,10 +139,7 @@ int ccw_device_notify(struct ccw_device *, int); void ccw_device_set_disconnected(struct ccw_device *cdev); void ccw_device_set_notoper(struct ccw_device *cdev); -/* qdio needs this. */ void ccw_device_set_timeout(struct ccw_device *, int); -extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); -extern struct bus_type ccw_bus_type; /* Channel measurement facility related */ void retry_set_schib(struct ccw_device *cdev); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index c56ab94612f..0bc902b3cd8 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -1,8 +1,7 @@ /* - * drivers/s390/cio/device_fsm.c * finite state machine for device handling * - * Copyright IBM Corp. 2002,2008 + * Copyright IBM Corp. 2002, 2008 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) */ @@ -45,10 +44,10 @@ static void ccw_timeout_log(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); private = to_io_private(sch); orb = &private->orb; - cc = stsch(sch->schid, &schib); + cc = stsch_err(sch->schid, &schib); printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " - "device information:\n", get_clock()); + "device information:\n", get_tod_clock()); printk(KERN_WARNING "cio: orb:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, orb, sizeof(*orb), 0); @@ -174,7 +173,10 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) ret = cio_clear (sch); return (ret == 0) ? -EBUSY : ret; } - panic("Can't stop i/o on subchannel.\n"); + /* Function was unsuccessful */ + CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n", + cdev->private->dev_id.ssid, cdev->private->dev_id.devno); + return -EIO; } void ccw_device_update_sense_data(struct ccw_device *cdev) @@ -315,7 +317,7 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) /** * ccw_device_notify() - inform the device's driver about an event - * @cdev: device for which an event occured + * @cdev: device for which an event occurred * @event: event that occurred * * Returns: @@ -349,9 +351,13 @@ out: static void ccw_device_oper_notify(struct ccw_device *cdev) { + struct subchannel *sch = to_subchannel(cdev->dev.parent); + if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { /* Reenable channel measurements, if needed. */ ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); + /* Save indication for new paths. */ + cdev->private->path_new_mask = sch->vpm; return; } /* Driver doesn't want device back. */ @@ -401,9 +407,10 @@ ccw_device_done(struct ccw_device *cdev, int state) CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " "%04x\n", cdev->private->dev_id.devno, sch->schid.sch_no); - if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) + if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) { + cdev->private->state = DEV_STATE_NOT_OPER; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); - else + } else ccw_device_set_disconnected(cdev); cdev->private->flags.donotify = 0; break; @@ -462,8 +469,52 @@ static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e) } } -void -ccw_device_verify_done(struct ccw_device *cdev, int err) +static void ccw_device_report_path_events(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + int path_event[8]; + int chp, mask; + + for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) { + path_event[chp] = PE_NONE; + if (mask & cdev->private->path_gone_mask & ~(sch->vpm)) + path_event[chp] |= PE_PATH_GONE; + if (mask & cdev->private->path_new_mask & sch->vpm) + path_event[chp] |= PE_PATH_AVAILABLE; + if (mask & cdev->private->pgid_reset_mask & sch->vpm) + path_event[chp] |= PE_PATHGROUP_ESTABLISHED; + } + if (cdev->online && cdev->drv->path_event) + cdev->drv->path_event(cdev, path_event); +} + +static void ccw_device_reset_path_events(struct ccw_device *cdev) +{ + cdev->private->path_gone_mask = 0; + cdev->private->path_new_mask = 0; + cdev->private->pgid_reset_mask = 0; +} + +static void create_fake_irb(struct irb *irb, int type) +{ + memset(irb, 0, sizeof(*irb)); + if (type == FAKE_CMD_IRB) { + struct cmd_scsw *scsw = &irb->scsw.cmd; + scsw->cc = 1; + scsw->fctl = SCSW_FCTL_START_FUNC; + scsw->actl = SCSW_ACTL_START_PEND; + scsw->stctl = SCSW_STCTL_STATUS_PEND; + } else if (type == FAKE_TM_IRB) { + struct tm_scsw *scsw = &irb->scsw.tm; + scsw->x = 1; + scsw->cc = 1; + scsw->fctl = SCSW_FCTL_START_FUNC; + scsw->actl = SCSW_ACTL_START_PEND; + scsw->stctl = SCSW_STCTL_STATUS_PEND; + } +} + +void ccw_device_verify_done(struct ccw_device *cdev, int err) { struct subchannel *sch; @@ -486,18 +537,15 @@ callback: ccw_device_done(cdev, DEV_STATE_ONLINE); /* Deliver fake irb to device driver, if needed. */ if (cdev->private->flags.fake_irb) { - memset(&cdev->private->irb, 0, sizeof(struct irb)); - cdev->private->irb.scsw.cmd.cc = 1; - cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC; - cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND; - cdev->private->irb.scsw.cmd.stctl = - SCSW_STCTL_STATUS_PEND; + create_fake_irb(&cdev->private->irb, + cdev->private->flags.fake_irb); cdev->private->flags.fake_irb = 0; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, &cdev->private->irb); memset(&cdev->private->irb, 0, sizeof(struct irb)); } + ccw_device_report_path_events(cdev); break; case -ETIME: case -EUSERS: @@ -516,6 +564,7 @@ callback: ccw_device_done(cdev, DEV_STATE_NOT_OPER); break; } + ccw_device_reset_path_events(cdev); } /* @@ -653,7 +702,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { /* * No final status yet or final status not yet delivered - * to the device driver. Can't do path verfication now, + * to the device driver. Can't do path verification now, * delay until final status was delivered. */ cdev->private->flags.doverify = 1; @@ -690,7 +739,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) struct irb *irb; int is_cmd; - irb = (struct irb *)&S390_lowcore.irb; + irb = &__get_cpu_var(cio_irb); is_cmd = !scsw_is_tm(&irb->scsw); /* Check for unsolicited interrupt. */ if (!scsw_is_solicited(&irb->scsw)) { @@ -734,13 +783,14 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) int ret; ccw_device_set_timeout(cdev, 0); + cdev->private->iretry = 255; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); cdev->private->state = DEV_STATE_TIMEOUT_KILL; return; } - if (ret == -ENODEV) + if (ret) dev_fsm_event(cdev, DEV_EVENT_NOTOPER); else if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, @@ -755,7 +805,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) { struct irb *irb; - irb = (struct irb *)&S390_lowcore.irb; + irb = &__get_cpu_var(cio_irb); /* Check for unsolicited interrupt. */ if (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { @@ -804,9 +854,6 @@ call_handler: static void ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) { - struct subchannel *sch; - - sch = to_subchannel(cdev->dev.parent); ccw_device_set_timeout(cdev, 0); /* Start delayed path verification. */ ccw_device_online_verify(cdev, 0); @@ -837,6 +884,7 @@ void ccw_device_kill_io(struct ccw_device *cdev) { int ret; + cdev->private->iretry = 255; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 78a0b43862c..d4fa30541a3 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -1,7 +1,7 @@ /* * CCW device SENSE ID I/O handling. * - * Copyright IBM Corp. 2002,2009 + * Copyright IBM Corp. 2002, 2009 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 6da84543dfe..f3c417943da 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, if (cdev->private->state == DEV_STATE_VERIFY) { /* Remember to fake irb when finished. */ if (!cdev->private->flags.fake_irb) { - cdev->private->flags.fake_irb = 1; + cdev->private->flags.fake_irb = FAKE_CMD_IRB; cdev->private->intparm = intparm; return 0; } else @@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, ret = cio_set_options (sch, flags); if (ret) return ret; - /* Adjust requested path mask to excluded varied off paths. */ + /* Adjust requested path mask to exclude unusable paths. */ if (lpm) { - lpm &= sch->opm; + lpm &= sch->lpm; if (lpm == 0) return -EACCES; } @@ -418,12 +418,9 @@ int ccw_device_resume(struct ccw_device *cdev) int ccw_device_call_handler(struct ccw_device *cdev) { - struct subchannel *sch; unsigned int stctl; int ending_status; - sch = to_subchannel(cdev->dev.parent); - /* * we allow for the device action handler if . * - we received ending status @@ -566,14 +563,23 @@ out_unlock: return rc; } -void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) +/** + * chp_get_chp_desc - return newly allocated channel-path descriptor + * @cdev: device to obtain the descriptor for + * @chp_idx: index of the channel path + * + * On success return a newly allocated copy of the channel-path description + * data associated with the given channel path. Return %NULL on error. + */ +struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev, + int chp_idx) { struct subchannel *sch; struct chp_id chpid; sch = to_subchannel(cdev->dev.parent); chp_id_init(&chpid); - chpid.id = sch->schib.pmcw.chpid[chp_no]; + chpid.id = sch->schib.pmcw.chpid[chp_idx]; return chp_get_chp_desc(chpid); } @@ -608,11 +614,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, sch = to_subchannel(cdev->dev.parent); if (!sch->schib.pmcw.ena) return -EINVAL; + if (cdev->private->state == DEV_STATE_VERIFY) { + /* Remember to fake irb when finished. */ + if (!cdev->private->flags.fake_irb) { + cdev->private->flags.fake_irb = FAKE_TM_IRB; + cdev->private->intparm = intparm; + return 0; + } else + /* There's already a fake I/O around. */ + return -EBUSY; + } if (cdev->private->state != DEV_STATE_ONLINE) return -EIO; - /* Adjust requested path mask to excluded varied off paths. */ + /* Adjust requested path mask to exclude unusable paths. */ if (lpm) { - lpm &= sch->opm; + lpm &= sch->lpm; if (lpm == 0) return -EACCES; } @@ -687,6 +703,52 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw, EXPORT_SYMBOL(ccw_device_tm_start_timeout); /** + * ccw_device_get_mdc - accumulate max data count + * @cdev: ccw device for which the max data count is accumulated + * @mask: mask of paths to use + * + * Return the number of 64K-bytes blocks all paths at least support + * for a transport command. Return values <= 0 indicate failures. + */ +int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct channel_path *chp; + struct chp_id chpid; + int mdc = 0, i; + + /* Adjust requested path mask to excluded varied off paths. */ + if (mask) + mask &= sch->lpm; + else + mask = sch->lpm; + + chp_id_init(&chpid); + for (i = 0; i < 8; i++) { + if (!(mask & (0x80 >> i))) + continue; + chpid.id = sch->schib.pmcw.chpid[i]; + chp = chpid_to_chp(chpid); + if (!chp) + continue; + + mutex_lock(&chp->lock); + if (!chp->desc_fmt1.f) { + mutex_unlock(&chp->lock); + return 0; + } + if (!chp->desc_fmt1.r) + mdc = 1; + mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) : + chp->desc_fmt1.mdc; + mutex_unlock(&chp->lock); + } + + return mdc; +} +EXPORT_SYMBOL(ccw_device_get_mdc); + +/** * ccw_device_tm_intrg - perform interrogate function * @cdev: ccw device on which to perform the interrogate function * @@ -708,14 +770,18 @@ int ccw_device_tm_intrg(struct ccw_device *cdev) } EXPORT_SYMBOL(ccw_device_tm_intrg); -// FIXME: these have to go: - -int -_ccw_device_get_subchannel_number(struct ccw_device *cdev) +/** + * ccw_device_get_schid - obtain a subchannel id + * @cdev: device to obtain the id for + * @schid: where to fill in the values + */ +void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid) { - return cdev->private->schid.sch_no; -} + struct subchannel *sch = to_subchannel(cdev->dev.parent); + *schid = sch->schid; +} +EXPORT_SYMBOL_GPL(ccw_device_get_schid); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_options_mask); @@ -730,5 +796,4 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key); EXPORT_SYMBOL(ccw_device_start_key); EXPORT_SYMBOL(ccw_device_get_ciw); EXPORT_SYMBOL(ccw_device_get_path_mask); -EXPORT_SYMBOL(_ccw_device_get_subchannel_number); EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc); diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 6facb5499a6..37ada05e82a 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -1,7 +1,7 @@ /* * CCW device PGID and path verification I/O handling. * - * Copyright IBM Corp. 2002,2009 + * Copyright IBM Corp. 2002, 2009 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> @@ -23,6 +23,8 @@ #define PGID_RETRIES 256 #define PGID_TIMEOUT (10 * HZ) +static void verify_start(struct ccw_device *cdev); + /* * Process path verification data and report result. */ @@ -70,8 +72,8 @@ static void nop_do(struct ccw_device *cdev) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; - /* Adjust lpm. */ - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm); + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & + ~cdev->private->path_noirq_mask); if (!req->lpm) goto out_nopath; nop_build_cp(cdev); @@ -102,10 +104,20 @@ static void nop_callback(struct ccw_device *cdev, void *data, int rc) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; - if (rc == 0) + switch (rc) { + case 0: sch->vpm |= req->lpm; - else if (rc != -EACCES) + break; + case -ETIME: + cdev->private->path_noirq_mask |= req->lpm; + break; + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; + break; + default: goto err; + } + /* Continue on the next path. */ req->lpm >>= 1; nop_do(cdev); return; @@ -132,6 +144,48 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn) req->cp = cp; } +static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc) +{ + if (rc) { + /* We don't know the path groups' state. Abort. */ + verify_done(cdev, rc); + return; + } + /* + * Path groups have been reset. Restart path verification but + * leave paths in path_noirq_mask out. + */ + cdev->private->flags.pgid_unknown = 0; + verify_start(cdev); +} + +/* + * Reset pathgroups and restart path verification, leave unusable paths out. + */ +static void pgid_wipeout_start(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_dev_id *id = &cdev->private->dev_id; + struct ccw_request *req = &cdev->private->req; + u8 fn; + + CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n", + id->ssid, id->devno, cdev->private->pgid_valid_mask, + cdev->private->path_noirq_mask); + + /* Initialize request data. */ + memset(req, 0, sizeof(*req)); + req->timeout = PGID_TIMEOUT; + req->maxretries = PGID_RETRIES; + req->lpm = sch->schib.pmcw.pam; + req->callback = pgid_wipeout_callback; + fn = SPID_FUNC_DISBAND; + if (cdev->private->flags.mpath) + fn |= SPID_FUNC_MULTI_PATH; + spid_build_cp(cdev, fn); + ccw_request_start(cdev); +} + /* * Perform establish/resign SET PGID on a single path. */ @@ -157,11 +211,14 @@ static void spid_do(struct ccw_device *cdev) return; out_nopath: + if (cdev->private->flags.pgid_unknown) { + /* At least one SPID could be partially done. */ + pgid_wipeout_start(cdev); + return; + } verify_done(cdev, sch->vpm ? 0 : -EACCES); } -static void verify_start(struct ccw_device *cdev); - /* * Process SET PGID request result for a single path. */ @@ -174,7 +231,12 @@ static void spid_callback(struct ccw_device *cdev, void *data, int rc) case 0: sch->vpm |= req->lpm & sch->opm; break; + case -ETIME: + cdev->private->flags.pgid_unknown = 1; + cdev->private->path_noirq_mask |= req->lpm; + break; case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; break; case -EOPNOTSUPP: if (cdev->private->flags.mpath) { @@ -208,10 +270,22 @@ static void spid_start(struct ccw_device *cdev) req->timeout = PGID_TIMEOUT; req->maxretries = PGID_RETRIES; req->lpm = 0x80; + req->singlepath = 1; req->callback = spid_callback; spid_do(cdev); } +static int pgid_is_reset(struct pgid *p) +{ + char *c; + + for (c = (char *)p + 1; c < (char *)(p + 1); c++) { + if (*c != 0) + return 0; + } + return 1; +} + static int pgid_cmp(struct pgid *p1, struct pgid *p2) { return memcmp((char *) p1 + 1, (char *) p2 + 1, @@ -222,7 +296,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2) * Determine pathgroup state from PGID data. */ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, - int *mismatch, int *reserved, int *reset) + int *mismatch, u8 *reserved, u8 *reset) { struct pgid *pgid = &cdev->private->pgid[0]; struct pgid *first = NULL; @@ -236,10 +310,9 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, if ((cdev->private->pgid_valid_mask & lpm) == 0) continue; if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) - *reserved = 1; - if (pgid->inf.ps.state1 == SNID_STATE1_RESET) { - /* A PGID was reset. */ - *reset = 1; + *reserved |= lpm; + if (pgid_is_reset(pgid)) { + *reset |= lpm; continue; } if (!first) { @@ -305,30 +378,36 @@ static void snid_done(struct ccw_device *cdev, int rc) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct pgid *pgid; int mismatch = 0; - int reserved = 0; - int reset = 0; + u8 reserved = 0; + u8 reset = 0; u8 donepm; if (rc) goto out; pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset); - if (reserved) + if (reserved == cdev->private->pgid_valid_mask) rc = -EUSERS; else if (mismatch) rc = -EOPNOTSUPP; else { donepm = pgid_to_donepm(cdev); sch->vpm = donepm & sch->opm; - cdev->private->pgid_todo_mask &= ~donepm; + cdev->private->pgid_reset_mask |= reset; + cdev->private->pgid_todo_mask &= + ~(donepm | cdev->private->path_noirq_mask); pgid_fill(cdev, pgid); } out: CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " - "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid, + "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid, id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, cdev->private->pgid_todo_mask, mismatch, reserved, reset); switch (rc) { case 0: + if (cdev->private->flags.pgid_unknown) { + pgid_wipeout_start(cdev); + return; + } /* Anything left to do? */ if (cdev->private->pgid_todo_mask == 0) { verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); @@ -372,9 +451,10 @@ static void snid_do(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; + int ret; - /* Adjust lpm if paths are not set in pam. */ - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam); + req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & + ~cdev->private->path_noirq_mask); if (!req->lpm) goto out_nopath; snid_build_cp(cdev); @@ -382,7 +462,13 @@ static void snid_do(struct ccw_device *cdev) return; out_nopath: - snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES); + if (cdev->private->pgid_valid_mask) + ret = 0; + else if (cdev->private->path_noirq_mask) + ret = -ETIME; + else + ret = -EACCES; + snid_done(cdev, ret); } /* @@ -392,10 +478,21 @@ static void snid_callback(struct ccw_device *cdev, void *data, int rc) { struct ccw_request *req = &cdev->private->req; - if (rc == 0) + switch (rc) { + case 0: cdev->private->pgid_valid_mask |= req->lpm; - else if (rc != -EACCES) + break; + case -ETIME: + cdev->private->flags.pgid_unknown = 1; + cdev->private->path_noirq_mask |= req->lpm; + break; + case -EACCES: + cdev->private->path_notoper_mask |= req->lpm; + break; + default: goto err; + } + /* Continue on the next path. */ req->lpm >>= 1; snid_do(cdev); return; @@ -415,11 +512,19 @@ static void verify_start(struct ccw_device *cdev) sch->vpm = 0; sch->lpm = sch->schib.pmcw.pam; + + /* Initialize PGID data. */ + memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); + cdev->private->pgid_valid_mask = 0; + cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; + cdev->private->path_notoper_mask = 0; + /* Initialize request data. */ memset(req, 0, sizeof(*req)); req->timeout = PGID_TIMEOUT; req->maxretries = PGID_RETRIES; req->lpm = 0x80; + req->singlepath = 1; if (cdev->private->flags.pgroup) { CIO_TRACE_EVENT(4, "snid"); CIO_HEX_EVENT(4, devid, sizeof(*devid)); @@ -446,14 +551,8 @@ static void verify_start(struct ccw_device *cdev) */ void ccw_device_verify_start(struct ccw_device *cdev) { - struct subchannel *sch = to_subchannel(cdev->dev.parent); - CIO_TRACE_EVENT(4, "vrfy"); CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); - /* Initialize PGID data. */ - memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); - cdev->private->pgid_valid_mask = 0; - cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; /* * Initialize pathgroup and multipath state with target values. * They may change in the course of path verification. @@ -461,6 +560,7 @@ void ccw_device_verify_start(struct ccw_device *cdev) cdev->private->flags.pgroup = cdev->private->options.pgroup; cdev->private->flags.mpath = cdev->private->options.mpath; cdev->private->flags.doverify = 0; + cdev->private->path_noirq_mask = 0; verify_start(cdev); } @@ -507,6 +607,7 @@ void ccw_device_disband_start(struct ccw_device *cdev) req->timeout = PGID_TIMEOUT; req->maxretries = PGID_RETRIES; req->lpm = sch->schib.pmcw.pam & sch->opm; + req->singlepath = 1; req->callback = disband_callback; fn = SPID_FUNC_DISBAND; if (cdev->private->flags.mpath) diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 66d8066ef22..15b56a15db1 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -1,8 +1,5 @@ /* - * drivers/s390/cio/device_status.c - * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 2002 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) * diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c new file mode 100644 index 00000000000..c4f7bf3e24c --- /dev/null +++ b/drivers/s390/cio/eadm_sch.c @@ -0,0 +1,418 @@ +/* + * Driver for s390 eadm subchannels + * + * Copyright IBM Corp. 2012 + * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#include <linux/kernel_stat.h> +#include <linux/completion.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/timer.h> +#include <linux/slab.h> +#include <linux/list.h> + +#include <asm/css_chars.h> +#include <asm/debug.h> +#include <asm/isc.h> +#include <asm/cio.h> +#include <asm/scsw.h> +#include <asm/eadm.h> + +#include "eadm_sch.h" +#include "ioasm.h" +#include "cio.h" +#include "css.h" +#include "orb.h" + +MODULE_DESCRIPTION("driver for s390 eadm subchannels"); +MODULE_LICENSE("GPL"); + +#define EADM_TIMEOUT (5 * HZ) +static DEFINE_SPINLOCK(list_lock); +static LIST_HEAD(eadm_list); + +static debug_info_t *eadm_debug; + +#define EADM_LOG(imp, txt) do { \ + debug_text_event(eadm_debug, imp, txt); \ + } while (0) + +static void EADM_LOG_HEX(int level, void *data, int length) +{ + if (!debug_level_enabled(eadm_debug, level)) + return; + while (length > 0) { + debug_event(eadm_debug, level, data, length); + length -= eadm_debug->buf_size; + data += eadm_debug->buf_size; + } +} + +static void orb_init(union orb *orb) +{ + memset(orb, 0, sizeof(union orb)); + orb->eadm.compat1 = 1; + orb->eadm.compat2 = 1; + orb->eadm.fmt = 1; + orb->eadm.x = 1; +} + +static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob) +{ + union orb *orb = &get_eadm_private(sch)->orb; + int cc; + + orb_init(orb); + orb->eadm.aob = (u32)__pa(aob); + orb->eadm.intparm = (u32)(addr_t)sch; + orb->eadm.key = PAGE_DEFAULT_KEY >> 4; + + EADM_LOG(6, "start"); + EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid)); + + cc = ssch(sch->schid, orb); + switch (cc) { + case 0: + sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND; + break; + case 1: /* status pending */ + case 2: /* busy */ + return -EBUSY; + case 3: /* not operational */ + return -ENODEV; + } + return 0; +} + +static int eadm_subchannel_clear(struct subchannel *sch) +{ + int cc; + + cc = csch(sch->schid); + if (cc) + return -ENODEV; + + sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND; + return 0; +} + +static void eadm_subchannel_timeout(unsigned long data) +{ + struct subchannel *sch = (struct subchannel *) data; + + spin_lock_irq(sch->lock); + EADM_LOG(1, "timeout"); + EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid)); + if (eadm_subchannel_clear(sch)) + EADM_LOG(0, "clear failed"); + spin_unlock_irq(sch->lock); +} + +static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires) +{ + struct eadm_private *private = get_eadm_private(sch); + + if (expires == 0) { + del_timer(&private->timer); + return; + } + if (timer_pending(&private->timer)) { + if (mod_timer(&private->timer, jiffies + expires)) + return; + } + private->timer.function = eadm_subchannel_timeout; + private->timer.data = (unsigned long) sch; + private->timer.expires = jiffies + expires; + add_timer(&private->timer); +} + +static void eadm_subchannel_irq(struct subchannel *sch) +{ + struct eadm_private *private = get_eadm_private(sch); + struct eadm_scsw *scsw = &sch->schib.scsw.eadm; + struct irb *irb = &__get_cpu_var(cio_irb); + int error = 0; + + EADM_LOG(6, "irq"); + EADM_LOG_HEX(6, irb, sizeof(*irb)); + + inc_irq_stat(IRQIO_ADM); + + if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) + && scsw->eswf == 1 && irb->esw.eadm.erw.r) + error = -EIO; + + if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC) + error = -ETIMEDOUT; + + eadm_subchannel_set_timeout(sch, 0); + + if (private->state != EADM_BUSY) { + EADM_LOG(1, "irq unsol"); + EADM_LOG_HEX(1, irb, sizeof(*irb)); + private->state = EADM_NOT_OPER; + css_sched_sch_todo(sch, SCH_TODO_EVAL); + return; + } + scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error); + private->state = EADM_IDLE; + + if (private->completion) + complete(private->completion); +} + +static struct subchannel *eadm_get_idle_sch(void) +{ + struct eadm_private *private; + struct subchannel *sch; + unsigned long flags; + + spin_lock_irqsave(&list_lock, flags); + list_for_each_entry(private, &eadm_list, head) { + sch = private->sch; + spin_lock(sch->lock); + if (private->state == EADM_IDLE) { + private->state = EADM_BUSY; + list_move_tail(&private->head, &eadm_list); + spin_unlock(sch->lock); + spin_unlock_irqrestore(&list_lock, flags); + + return sch; + } + spin_unlock(sch->lock); + } + spin_unlock_irqrestore(&list_lock, flags); + + return NULL; +} + +int eadm_start_aob(struct aob *aob) +{ + struct eadm_private *private; + struct subchannel *sch; + unsigned long flags; + int ret; + + sch = eadm_get_idle_sch(); + if (!sch) + return -EBUSY; + + spin_lock_irqsave(sch->lock, flags); + eadm_subchannel_set_timeout(sch, EADM_TIMEOUT); + ret = eadm_subchannel_start(sch, aob); + if (!ret) + goto out_unlock; + + /* Handle start subchannel failure. */ + eadm_subchannel_set_timeout(sch, 0); + private = get_eadm_private(sch); + private->state = EADM_NOT_OPER; + css_sched_sch_todo(sch, SCH_TODO_EVAL); + +out_unlock: + spin_unlock_irqrestore(sch->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(eadm_start_aob); + +static int eadm_subchannel_probe(struct subchannel *sch) +{ + struct eadm_private *private; + int ret; + + private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); + if (!private) + return -ENOMEM; + + INIT_LIST_HEAD(&private->head); + init_timer(&private->timer); + + spin_lock_irq(sch->lock); + set_eadm_private(sch, private); + private->state = EADM_IDLE; + private->sch = sch; + sch->isc = EADM_SCH_ISC; + ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); + if (ret) { + set_eadm_private(sch, NULL); + spin_unlock_irq(sch->lock); + kfree(private); + goto out; + } + spin_unlock_irq(sch->lock); + + spin_lock_irq(&list_lock); + list_add(&private->head, &eadm_list); + spin_unlock_irq(&list_lock); + + if (dev_get_uevent_suppress(&sch->dev)) { + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } +out: + return ret; +} + +static void eadm_quiesce(struct subchannel *sch) +{ + struct eadm_private *private = get_eadm_private(sch); + DECLARE_COMPLETION_ONSTACK(completion); + int ret; + + spin_lock_irq(sch->lock); + if (private->state != EADM_BUSY) + goto disable; + + if (eadm_subchannel_clear(sch)) + goto disable; + + private->completion = &completion; + spin_unlock_irq(sch->lock); + + wait_for_completion_io(&completion); + + spin_lock_irq(sch->lock); + private->completion = NULL; + +disable: + eadm_subchannel_set_timeout(sch, 0); + do { + ret = cio_disable_subchannel(sch); + } while (ret == -EBUSY); + + spin_unlock_irq(sch->lock); +} + +static int eadm_subchannel_remove(struct subchannel *sch) +{ + struct eadm_private *private = get_eadm_private(sch); + + spin_lock_irq(&list_lock); + list_del(&private->head); + spin_unlock_irq(&list_lock); + + eadm_quiesce(sch); + + spin_lock_irq(sch->lock); + set_eadm_private(sch, NULL); + spin_unlock_irq(sch->lock); + + kfree(private); + + return 0; +} + +static void eadm_subchannel_shutdown(struct subchannel *sch) +{ + eadm_quiesce(sch); +} + +static int eadm_subchannel_freeze(struct subchannel *sch) +{ + return cio_disable_subchannel(sch); +} + +static int eadm_subchannel_restore(struct subchannel *sch) +{ + return cio_enable_subchannel(sch, (u32)(unsigned long)sch); +} + +/** + * eadm_subchannel_sch_event - process subchannel event + * @sch: subchannel + * @process: non-zero if function is called in process context + * + * An unspecified event occurred for this subchannel. Adjust data according + * to the current operational state of the subchannel. Return zero when the + * event has been handled sufficiently or -EAGAIN when this function should + * be called again in process context. + */ +static int eadm_subchannel_sch_event(struct subchannel *sch, int process) +{ + struct eadm_private *private; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(sch->lock, flags); + if (!device_is_registered(&sch->dev)) + goto out_unlock; + + if (work_pending(&sch->todo_work)) + goto out_unlock; + + if (cio_update_schib(sch)) { + css_sched_sch_todo(sch, SCH_TODO_UNREG); + goto out_unlock; + } + private = get_eadm_private(sch); + if (private->state == EADM_NOT_OPER) + private->state = EADM_IDLE; + +out_unlock: + spin_unlock_irqrestore(sch->lock, flags); + + return ret; +} + +static struct css_device_id eadm_subchannel_ids[] = { + { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(css, eadm_subchannel_ids); + +static struct css_driver eadm_subchannel_driver = { + .drv = { + .name = "eadm_subchannel", + .owner = THIS_MODULE, + }, + .subchannel_type = eadm_subchannel_ids, + .irq = eadm_subchannel_irq, + .probe = eadm_subchannel_probe, + .remove = eadm_subchannel_remove, + .shutdown = eadm_subchannel_shutdown, + .sch_event = eadm_subchannel_sch_event, + .freeze = eadm_subchannel_freeze, + .thaw = eadm_subchannel_restore, + .restore = eadm_subchannel_restore, +}; + +static int __init eadm_sch_init(void) +{ + int ret; + + if (!css_general_characteristics.eadm) + return -ENXIO; + + eadm_debug = debug_register("eadm_log", 16, 1, 16); + if (!eadm_debug) + return -ENOMEM; + + debug_register_view(eadm_debug, &debug_hex_ascii_view); + debug_set_level(eadm_debug, 2); + + isc_register(EADM_SCH_ISC); + ret = css_driver_register(&eadm_subchannel_driver); + if (ret) + goto cleanup; + + return ret; + +cleanup: + isc_unregister(EADM_SCH_ISC); + debug_unregister(eadm_debug); + return ret; +} + +static void __exit eadm_sch_exit(void) +{ + css_driver_unregister(&eadm_subchannel_driver); + isc_unregister(EADM_SCH_ISC); + debug_unregister(eadm_debug); +} +module_init(eadm_sch_init); +module_exit(eadm_sch_exit); diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h new file mode 100644 index 00000000000..9664e4653f9 --- /dev/null +++ b/drivers/s390/cio/eadm_sch.h @@ -0,0 +1,22 @@ +#ifndef EADM_SCH_H +#define EADM_SCH_H + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/timer.h> +#include <linux/list.h> +#include "orb.h" + +struct eadm_private { + union orb orb; + enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state; + struct completion *completion; + struct subchannel *sch; + struct timer_list timer; + struct list_head head; +} __aligned(8); + +#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev)) +#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p)) + +#endif diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index 4d10981c7cc..5a999084a22 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c @@ -1,11 +1,10 @@ /* - * drivers/s390/cio/idset.c - * - * Copyright IBM Corp. 2007 + * Copyright IBM Corp. 2007, 2012 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ #include <linux/vmalloc.h> +#include <linux/bitmap.h> #include <linux/bitops.h> #include "idset.h" #include "css.h" @@ -18,7 +17,7 @@ struct idset { static inline unsigned long bitmap_size(int num_ssid, int num_id) { - return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long); + return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long); } static struct idset *idset_new(int num_ssid, int num_id) @@ -91,6 +90,14 @@ void idset_sch_del(struct idset *set, struct subchannel_id schid) idset_del(set, schid.ssid, schid.sch_no); } +/* Clear ids starting from @schid up to end of subchannel set. */ +void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid) +{ + int pos = schid.ssid * set->num_id + schid.sch_no; + + bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no); +} + int idset_sch_contains(struct idset *set, struct subchannel_id schid) { return idset_contains(set, schid.ssid, schid.sch_no); @@ -113,20 +120,12 @@ int idset_sch_get_first(struct idset *set, struct subchannel_id *schid) int idset_is_empty(struct idset *set) { - int bitnum; - - bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); - if (bitnum >= set->num_ssid * set->num_id) - return 1; - return 0; + return bitmap_empty(set->bitmap, set->num_ssid * set->num_id); } void idset_add_set(struct idset *to, struct idset *from) { - unsigned long i, len; + int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id); - len = min(__BITOPS_WORDS(to->num_ssid * to->num_id), - __BITOPS_WORDS(from->num_ssid * from->num_id)); - for (i = 0; i < len ; i++) - to->bitmap[i] |= from->bitmap[i]; + bitmap_or(to->bitmap, to->bitmap, from->bitmap, len); } diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h index 7543da4529f..06d3bc01bb0 100644 --- a/drivers/s390/cio/idset.h +++ b/drivers/s390/cio/idset.h @@ -1,7 +1,5 @@ /* - * drivers/s390/cio/idset.h - * - * Copyright IBM Corp. 2007 + * Copyright IBM Corp. 2007, 2012 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ @@ -19,6 +17,7 @@ void idset_fill(struct idset *set); struct idset *idset_sch_new(void); void idset_sch_add(struct idset *set, struct subchannel_id id); void idset_sch_del(struct idset *set, struct subchannel_id id); +void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid); int idset_sch_contains(struct idset *set, struct subchannel_id id); int idset_sch_get_first(struct idset *set, struct subchannel_id *id); int idset_is_empty(struct idset *set); diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index b9ce712a7f2..b108f4a5c7d 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -4,69 +4,38 @@ #include <linux/types.h> #include <asm/schid.h> #include <asm/ccwdev.h> +#include <asm/irq.h> #include "css.h" - -/* - * command-mode operation request block - */ -struct cmd_orb { - u32 intparm; /* interruption parameter */ - u32 key : 4; /* flags, like key, suspend control, etc. */ - u32 spnd : 1; /* suspend control */ - u32 res1 : 1; /* reserved */ - u32 mod : 1; /* modification control */ - u32 sync : 1; /* synchronize control */ - u32 fmt : 1; /* format control */ - u32 pfch : 1; /* prefetch control */ - u32 isic : 1; /* initial-status-interruption control */ - u32 alcc : 1; /* address-limit-checking control */ - u32 ssic : 1; /* suppress-suspended-interr. control */ - u32 res2 : 1; /* reserved */ - u32 c64 : 1; /* IDAW/QDIO 64 bit control */ - u32 i2k : 1; /* IDAW 2/4kB block size control */ - u32 lpm : 8; /* logical path mask */ - u32 ils : 1; /* incorrect length */ - u32 zero : 6; /* reserved zeros */ - u32 orbx : 1; /* ORB extension control */ - u32 cpa; /* channel program address */ -} __attribute__ ((packed, aligned(4))); - -/* - * transport-mode operation request block - */ -struct tm_orb { - u32 intparm; - u32 key:4; - u32 :9; - u32 b:1; - u32 :2; - u32 lpm:8; - u32 :7; - u32 x:1; - u32 tcw; - u32 prio:8; - u32 :8; - u32 rsvpgm:8; - u32 :8; - u32 :32; - u32 :32; - u32 :32; - u32 :32; -} __attribute__ ((packed, aligned(4))); - -union orb { - struct cmd_orb cmd; - struct tm_orb tm; -} __attribute__ ((packed, aligned(4))); +#include "orb.h" struct io_subchannel_private { union orb orb; /* operation request block */ struct ccw1 sense_ccw; /* static ccw for sense command */ -} __attribute__ ((aligned(8))); + struct ccw_device *cdev;/* pointer to the child ccw device */ + struct { + unsigned int suspend:1; /* allow suspend */ + unsigned int prefetch:1;/* deny prefetch */ + unsigned int inter:1; /* suppress intermediate interrupts */ + } __packed options; +} __aligned(8); -#define to_io_private(n) ((struct io_subchannel_private *)n->private) -#define sch_get_cdev(n) (dev_get_drvdata(&n->dev)) -#define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c)) +#define to_io_private(n) ((struct io_subchannel_private *) \ + dev_get_drvdata(&(n)->dev)) +#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p)) + +static inline struct ccw_device *sch_get_cdev(struct subchannel *sch) +{ + struct io_subchannel_private *priv = to_io_private(sch); + return priv ? priv->cdev : NULL; +} + +static inline void sch_set_cdev(struct subchannel *sch, + struct ccw_device *cdev) +{ + struct io_subchannel_private *priv = to_io_private(sch); + if (priv) + priv->cdev = cdev; +} #define MAX_CIWS 8 @@ -92,11 +61,12 @@ enum io_status { * @filter: optional callback to adjust request status based on IRB data * @callback: final callback * @data: user-defined pointer passed to all callbacks + * @singlepath: if set, use only one path from @lpm per start I/O + * @cancel: non-zero if request was cancelled + * @done: non-zero if request was finished * @mask: current path mask * @retries: current number of retries * @drc: delayed return code - * @cancel: non-zero if request was cancelled - * @done: non-zero if request was finished */ struct ccw_request { struct ccw1 *cp; @@ -108,12 +78,13 @@ struct ccw_request { enum io_status); void (*callback)(struct ccw_device *, void *, int); void *data; + unsigned int singlepath:1; /* These fields are used internally. */ + unsigned int cancel:1; + unsigned int done:1; u16 mask; u16 retries; int drc; - int cancel:1; - int done:1; } __attribute__((packed)); /* @@ -140,6 +111,9 @@ enum cdev_todo { CDEV_TODO_UNREG_EVAL, }; +#define FAKE_CMD_IRB 1 +#define FAKE_TM_IRB 2 + struct ccw_device_private { struct ccw_device *cdev; struct subchannel *sch; @@ -149,8 +123,15 @@ struct ccw_device_private { struct subchannel_id schid; /* subchannel number */ struct ccw_request req; /* internal I/O request */ int iretry; - u8 pgid_valid_mask; /* mask of valid PGIDs */ - u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ + u8 pgid_valid_mask; /* mask of valid PGIDs */ + u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ + u8 pgid_reset_mask; /* mask of PGIDs which were reset */ + u8 path_noirq_mask; /* mask of paths for which no irq was + received */ + u8 path_notoper_mask; /* mask of paths which were found + not operable */ + u8 path_gone_mask; /* mask of paths, that became unavailable */ + u8 path_new_mask; /* mask of paths, that became available */ struct { unsigned int fast:1; /* post with "channel end" */ unsigned int repall:1; /* report every interrupt status */ @@ -164,10 +145,11 @@ struct ccw_device_private { unsigned int doverify:1; /* delayed path verification */ unsigned int donotify:1; /* call notify function */ unsigned int recog_done:1; /* dev. recog. complete */ - unsigned int fake_irb:1; /* deliver faked irb */ + unsigned int fake_irb:2; /* deliver faked irb */ unsigned int resuming:1; /* recognition while resume */ unsigned int pgroup:1; /* pathgroup is set up */ unsigned int mpath:1; /* multipathing is set up */ + unsigned int pgid_unknown:1;/* unknown pgid state */ unsigned int initialized:1; /* set if initial reference held */ } __attribute__((packed)) flags; unsigned long intparm; /* user interruption parameter */ @@ -184,25 +166,9 @@ struct ccw_device_private { struct list_head cmb_list; /* list of measured devices */ u64 cmb_start_time; /* clock value of cmb reset */ void *cmb_wait; /* deferred cmb enable/disable */ + enum interruption_class int_class; }; -static inline int ssch(struct subchannel_id schid, union orb *addr) -{ - register struct subchannel_id reg1 asm("1") = schid; - int ccode = -EIO; - - asm volatile( - " ssch 0(%2)\n" - "0: ipm %0\n" - " srl %0,28\n" - "1:\n" - EX_TABLE(0b, 1b) - : "+d" (ccode) - : "d" (reg1), "a" (addr), "m" (*addr) - : "cc", "memory"); - return ccode; -} - static inline int rsch(struct subchannel_id schid) { register struct subchannel_id reg1 asm("1") = schid; @@ -218,21 +184,6 @@ static inline int rsch(struct subchannel_id schid) return ccode; } -static inline int csch(struct subchannel_id schid) -{ - register struct subchannel_id reg1 asm("1") = schid; - int ccode; - - asm volatile( - " csch\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) - : "d" (reg1) - : "cc"); - return ccode; -} - static inline int hsch(struct subchannel_id schid) { register struct subchannel_id reg1 asm("1") = schid; diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 75926279263..4d80fc67a06 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -3,6 +3,8 @@ #include <asm/chpid.h> #include <asm/schid.h> +#include "orb.h" +#include "cio.h" /* * TPI info structure @@ -23,21 +25,6 @@ struct tpi_info { * Some S390 specific IO instructions as inline */ -static inline int stsch(struct subchannel_id schid, struct schib *addr) -{ - register struct subchannel_id reg1 asm ("1") = schid; - int ccode; - - asm volatile( - " stsch 0(%3)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode), "=m" (*addr) - : "d" (reg1), "a" (addr) - : "cc"); - return ccode; -} - static inline int stsch_err(struct subchannel_id schid, struct schib *addr) { register struct subchannel_id reg1 asm ("1") = schid; @@ -102,6 +89,38 @@ static inline int tsch(struct subchannel_id schid, struct irb *addr) return ccode; } +static inline int ssch(struct subchannel_id schid, union orb *addr) +{ + register struct subchannel_id reg1 asm("1") = schid; + int ccode = -EIO; + + asm volatile( + " ssch 0(%2)\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (ccode) + : "d" (reg1), "a" (addr), "m" (*addr) + : "cc", "memory"); + return ccode; +} + +static inline int csch(struct subchannel_id schid) +{ + register struct subchannel_id reg1 asm("1") = schid; + int ccode; + + asm volatile( + " csch\n" + " ipm %0\n" + " srl %0,28" + : "=d" (ccode) + : "d" (reg1) + : "cc"); + return ccode; +} + static inline int tpi(struct tpi_info *addr) { int ccode; diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c index 17da9ab932e..358ee16d10a 100644 --- a/drivers/s390/cio/itcw.c +++ b/drivers/s390/cio/itcw.c @@ -42,7 +42,7 @@ * size_t size; * * size = itcw_calc_size(1, 2, 0); - * buffer = kmalloc(size, GFP_DMA); + * buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); * if (!buffer) * return -ENOMEM; * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0); @@ -93,6 +93,7 @@ EXPORT_SYMBOL(itcw_get_tcw); size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) { size_t len; + int cross_count; /* Main data. */ len = sizeof(struct itcw); @@ -105,12 +106,27 @@ size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) /* TSB */ sizeof(struct tsb) + /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); } + /* Maximum required alignment padding. */ len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; - /* Maximum padding for structures that may not cross 4k boundary. */ - if ((max_tidaws > 0) || (intrg_max_tidaws > 0)) - len += max(max_tidaws, intrg_max_tidaws) * - sizeof(struct tidaw) - 1; + + /* TIDAW lists may not cross a 4k boundary. To cross a + * boundary we need to add a TTIC TIDAW. We need to reserve + * one additional TIDAW for a TTIC that we may need to add due + * to the placement of the data chunk in memory, and a further + * TIDAW for each page boundary that the TIDAW list may cross + * due to it's own size. + */ + if (max_tidaws) { + cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1) + >> PAGE_SHIFT); + len += cross_count * sizeof(struct tidaw); + } + if (intrg_max_tidaws) { + cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1) + >> PAGE_SHIFT); + len += cross_count * sizeof(struct tidaw); + } return len; } EXPORT_SYMBOL(itcw_calc_size); @@ -165,6 +181,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, void *chunk; addr_t start; addr_t end; + int cross_count; /* Check for 2G limit. */ start = (addr_t) buffer; @@ -177,8 +194,17 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, if (IS_ERR(chunk)) return chunk; itcw = chunk; - itcw->max_tidaws = max_tidaws; - itcw->intrg_max_tidaws = intrg_max_tidaws; + /* allow for TTIC tidaws that may be needed to cross a page boundary */ + cross_count = 0; + if (max_tidaws) + cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1) + >> PAGE_SHIFT); + itcw->max_tidaws = max_tidaws + cross_count; + cross_count = 0; + if (intrg_max_tidaws) + cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1) + >> PAGE_SHIFT); + itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count; /* Main TCW. */ chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); if (IS_ERR(chunk)) @@ -198,7 +224,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, /* Data TIDAL. */ if (max_tidaws > 0) { chunk = fit_chunk(&start, end, sizeof(struct tidaw) * - max_tidaws, 16, 1); + itcw->max_tidaws, 16, 0); if (IS_ERR(chunk)) return chunk; tcw_set_data(itcw->tcw, chunk, 1); @@ -206,7 +232,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, /* Interrogate data TIDAL. */ if (intrg && (intrg_max_tidaws > 0)) { chunk = fit_chunk(&start, end, sizeof(struct tidaw) * - intrg_max_tidaws, 16, 1); + itcw->intrg_max_tidaws, 16, 0); if (IS_ERR(chunk)) return chunk; tcw_set_data(itcw->intrg_tcw, chunk, 1); @@ -283,13 +309,29 @@ EXPORT_SYMBOL(itcw_add_dcw); * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the * available space. * - * Note: the tidaw-list is assumed to be contiguous with no ttics. The - * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize. + * Note: TTIC tidaws are automatically added when needed, so explicitly calling + * this interface with the TTIC flag is not supported. The last-tidaw flag + * for the last tidaw in the list will be set by itcw_finalize. */ struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) { + struct tidaw *following; + if (itcw->num_tidaws >= itcw->max_tidaws) return ERR_PTR(-ENOSPC); + /* + * Is the tidaw, which follows the one we are about to fill, on the next + * page? Then we have to insert a TTIC tidaw first, that points to the + * tidaw on the new page. + */ + following = ((struct tidaw *) tcw_get_data(itcw->tcw)) + + itcw->num_tidaws + 1; + if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) { + tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, + TIDAW_FLAGS_TTIC, following, 0); + if (itcw->num_tidaws >= itcw->max_tidaws) + return ERR_PTR(-ENOSPC); + } return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); } EXPORT_SYMBOL(itcw_add_tidaw); diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h new file mode 100644 index 00000000000..7a640530e7f --- /dev/null +++ b/drivers/s390/cio/orb.h @@ -0,0 +1,91 @@ +/* + * Orb related data structures. + * + * Copyright IBM Corp. 2007, 2011 + * + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + * Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#ifndef S390_ORB_H +#define S390_ORB_H + +/* + * Command-mode operation request block + */ +struct cmd_orb { + u32 intparm; /* interruption parameter */ + u32 key:4; /* flags, like key, suspend control, etc. */ + u32 spnd:1; /* suspend control */ + u32 res1:1; /* reserved */ + u32 mod:1; /* modification control */ + u32 sync:1; /* synchronize control */ + u32 fmt:1; /* format control */ + u32 pfch:1; /* prefetch control */ + u32 isic:1; /* initial-status-interruption control */ + u32 alcc:1; /* address-limit-checking control */ + u32 ssic:1; /* suppress-suspended-interr. control */ + u32 res2:1; /* reserved */ + u32 c64:1; /* IDAW/QDIO 64 bit control */ + u32 i2k:1; /* IDAW 2/4kB block size control */ + u32 lpm:8; /* logical path mask */ + u32 ils:1; /* incorrect length */ + u32 zero:6; /* reserved zeros */ + u32 orbx:1; /* ORB extension control */ + u32 cpa; /* channel program address */ +} __packed __aligned(4); + +/* + * Transport-mode operation request block + */ +struct tm_orb { + u32 intparm; + u32 key:4; + u32:9; + u32 b:1; + u32:2; + u32 lpm:8; + u32:7; + u32 x:1; + u32 tcw; + u32 prio:8; + u32:8; + u32 rsvpgm:8; + u32:8; + u32:32; + u32:32; + u32:32; + u32:32; +} __packed __aligned(4); + +/* + * eadm operation request block + */ +struct eadm_orb { + u32 intparm; + u32 key:4; + u32:4; + u32 compat1:1; + u32 compat2:1; + u32:21; + u32 x:1; + u32 aob; + u32 css_prio:8; + u32:8; + u32 scm_prio:8; + u32:8; + u32:29; + u32 fmt:3; + u32:32; + u32:32; + u32:32; +} __packed __aligned(4); + +union orb { + struct cmd_orb cmd; + struct tm_orb tm; + struct eadm_orb eadm; +} __packed __aligned(4); + +#endif /* S390_ORB_H */ diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 48aa0647432..a563e4c0059 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -1,7 +1,5 @@ /* - * linux/drivers/s390/cio/qdio.h - * - * Copyright 2000,2009 IBM Corp. + * Copyright IBM Corp. 2000, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> * Jan Glauber <jang@linux.vnet.ibm.com> */ @@ -13,16 +11,10 @@ #include <asm/debug.h> #include "chsc.h" -#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ -#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ - -/* - * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait - * till next initiative to give transmitted skbs back to the stack is too long. - * Therefore polling is started in case of multicast queue is filled more - * than 50 percent. - */ -#define QDIO_IQDIO_POLL_LVL 65 /* HS multicast queue */ +#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */ +#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */ +#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */ +#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */ enum qdio_irq_states { QDIO_IRQ_STATE_INACTIVE, @@ -42,6 +34,7 @@ enum qdio_irq_states { #define SLSB_STATE_NOT_INIT 0x0 #define SLSB_STATE_EMPTY 0x1 #define SLSB_STATE_PRIMED 0x2 +#define SLSB_STATE_PENDING 0x3 #define SLSB_STATE_HALTED 0xe #define SLSB_STATE_ERROR 0xf #define SLSB_TYPE_INPUT 0x0 @@ -65,6 +58,8 @@ enum qdio_irq_states { (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */ #define SLSB_P_OUTPUT_EMPTY \ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */ +#define SLSB_P_OUTPUT_PENDING \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */ #define SLSB_CU_OUTPUT_PRIMED \ (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */ #define SLSB_P_OUTPUT_HALTED \ @@ -82,14 +77,12 @@ enum qdio_irq_states { #define CHSC_FLAG_QDIO_CAPABILITY 0x80 #define CHSC_FLAG_VALIDITY 0x40 -/* qdio adapter-characteristics-1 flag */ -#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ -#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ -#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ -#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */ -#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */ -#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ -#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ +/* SIGA flags */ +#define QDIO_SIGA_WRITE 0x00 +#define QDIO_SIGA_READ 0x01 +#define QDIO_SIGA_SYNC 0x02 +#define QDIO_SIGA_WRITEQ 0x04 +#define QDIO_SIGA_QEBSM_FLAG 0x80 #ifdef CONFIG_64BIT static inline int do_sqbs(u64 token, unsigned char state, int queue, @@ -142,44 +135,9 @@ struct siga_flag { u8 input:1; u8 output:1; u8 sync:1; - u8 no_sync_ti:1; - u8 no_sync_out_ti:1; - u8 no_sync_out_pci:1; - u8:2; -} __attribute__ ((packed)); - -struct chsc_ssqd_area { - struct chsc_header request; - u16:10; - u8 ssid:2; - u8 fmt:4; - u16 first_sch; - u16:16; - u16 last_sch; - u32:32; - struct chsc_header response; - u32:32; - struct qdio_ssqd_desc qdio_ssqd; -} __attribute__ ((packed)); - -struct scssc_area { - struct chsc_header request; - u16 operation_code; - u16:16; - u32:32; - u32:32; - u64 summary_indicator_addr; - u64 subchannel_indicator_addr; - u32 ks:4; - u32 kc:4; - u32:21; - u32 isc:3; - u32 word_with_d_bit; - u32:32; - struct subchannel_id schid; - u32 reserved[1004]; - struct chsc_header response; - u32:32; + u8 sync_after_ai:1; + u8 sync_out_after_pci:1; + u8:3; } __attribute__ ((packed)); struct qdio_dev_perf_stat { @@ -202,12 +160,14 @@ struct qdio_dev_perf_stat { unsigned int inbound_queue_full; unsigned int outbound_call; unsigned int outbound_handler; + unsigned int outbound_queue_full; unsigned int fast_requeue; unsigned int target_full; unsigned int eqbs; unsigned int eqbs_partial; unsigned int sqbs; unsigned int sqbs_partial; + unsigned int int_discarded; } ____cacheline_aligned; struct qdio_queue_perf_stat { @@ -222,6 +182,10 @@ struct qdio_queue_perf_stat { unsigned int nr_sbal_total; }; +enum qdio_queue_irq_states { + QDIO_QUEUE_IRQS_DISABLED, +}; + struct qdio_input_q { /* input buffer acknowledgement flag */ int polling; @@ -231,15 +195,25 @@ struct qdio_input_q { int ack_count; /* last time of noticing incoming data */ u64 timestamp; + /* upper-layer polling flag */ + unsigned long queue_irq_state; + /* callback to start upper-layer polling */ + void (*queue_start_poll) (struct ccw_device *, int, unsigned long); }; struct qdio_output_q { /* PCIs are enabled for the queue */ int pci_out_enabled; - /* IQDIO: output multiple buffers (enhanced SIGA) */ - int use_enh_siga; + /* cq: use asynchronous output buffers */ + int use_cq; + /* cq: aobs used for particual SBAL */ + struct qaob **aobs; + /* cq: sbal state related to asynchronous operation */ + struct qdio_outbuf_state *sbal_state; /* timer to check for more outbound work */ struct timer_list timer; + /* used SBALs before tasklet schedule */ + int scan_threshold; }; /* @@ -272,6 +246,9 @@ struct qdio_q { /* error condition during a data transfer */ unsigned int qdio_error; + /* last scan of the queue */ + u64 timestamp; + struct tasklet_struct tasklet; struct qdio_queue_perf_stat q_stats; @@ -296,10 +273,8 @@ struct qdio_q { struct qdio_irq *irq_ptr; struct sl *sl; /* - * Warning: Leave this member at the end so it won't be cleared in - * qdio_fill_qs. A page is allocated under this pointer and used for - * slib and sl. slib is 2048 bytes big and sl points to offset - * PAGE_SIZE / 2. + * A page is allocated under this pointer and used for slib and sl. + * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2. */ struct slib *slib; } __attribute__ ((aligned(256))); @@ -372,30 +347,24 @@ static inline int multicast_outbound(struct qdio_q *q) (q->nr == q->irq_ptr->nr_output_qs - 1); } -static inline unsigned long long get_usecs(void) -{ - return monotonic_clock() >> 12; -} - #define pci_out_supported(q) \ (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) #define is_qebsm(q) (q->irq_ptr->sch_token != 0) -#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti) -#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti) #define need_siga_in(q) (q->irq_ptr->siga_flag.input) #define need_siga_out(q) (q->irq_ptr->siga_flag.output) -#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync) -#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci) - -#define for_each_input_queue(irq_ptr, q, i) \ - for (i = 0, q = irq_ptr->input_qs[0]; \ - i < irq_ptr->nr_input_qs; \ - q = irq_ptr->input_qs[++i]) -#define for_each_output_queue(irq_ptr, q, i) \ - for (i = 0, q = irq_ptr->output_qs[0]; \ - i < irq_ptr->nr_output_qs; \ - q = irq_ptr->output_qs[++i]) +#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync)) +#define need_siga_sync_after_ai(q) \ + (unlikely(q->irq_ptr->siga_flag.sync_after_ai)) +#define need_siga_sync_out_after_pci(q) \ + (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci)) + +#define for_each_input_queue(irq_ptr, q, i) \ + for (i = 0; i < irq_ptr->nr_input_qs && \ + ({ q = irq_ptr->input_qs[i]; 1; }); i++) +#define for_each_output_queue(irq_ptr, q, i) \ + for (i = 0; i < irq_ptr->nr_output_qs && \ + ({ q = irq_ptr->output_qs[i]; 1; }); i++) #define prev_buf(bufnr) \ ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) @@ -406,6 +375,13 @@ static inline unsigned long long get_usecs(void) #define sub_buf(bufnr, dec) \ ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) +#define queue_irqs_enabled(q) \ + (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0) +#define queue_irqs_disabled(q) \ + (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0) + +extern u64 last_ai_time; + /* prototypes for thin interrupt */ void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); @@ -417,6 +393,8 @@ int tiqdio_allocate_memory(void); void tiqdio_free_memory(void); int tiqdio_register_thinints(void); void tiqdio_unregister_thinints(void); +void clear_nonshared_ind(struct qdio_irq *); +int test_nonshared_ind(struct qdio_irq *); /* prototypes for setup */ void qdio_inbound_processing(unsigned long data); @@ -438,6 +416,9 @@ int qdio_setup_create_sysfs(struct ccw_device *cdev); void qdio_setup_destroy_sysfs(struct ccw_device *cdev); int qdio_setup_init(void); void qdio_setup_exit(void); +int qdio_enable_async_operation(struct qdio_output_q *q); +void qdio_disable_async_operation(struct qdio_output_q *q); +struct qaob *qdio_allocate_aob(void); int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state); diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 6ce83f56d53..f1f3baa8e6e 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -1,12 +1,13 @@ /* - * drivers/s390/cio/qdio_debug.c - * - * Copyright IBM Corp. 2008,2009 + * Copyright IBM Corp. 2008, 2009 * * Author: Jan Glauber (jang@linux.vnet.ibm.com) */ #include <linux/seq_file.h> #include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/export.h> +#include <linux/slab.h> #include <asm/debug.h> #include "qdio_debug.h" #include "qdio.h" @@ -16,11 +17,51 @@ debug_info_t *qdio_dbf_error; static struct dentry *debugfs_root; #define QDIO_DEBUGFS_NAME_LEN 10 +#define QDIO_DBF_NAME_LEN 20 + +struct qdio_dbf_entry { + char dbf_name[QDIO_DBF_NAME_LEN]; + debug_info_t *dbf_info; + struct list_head dbf_list; +}; -void qdio_allocate_dbf(struct qdio_initialize *init_data, +static LIST_HEAD(qdio_dbf_list); +static DEFINE_MUTEX(qdio_dbf_list_mutex); + +static debug_info_t *qdio_get_dbf_entry(char *name) +{ + struct qdio_dbf_entry *entry; + debug_info_t *rc = NULL; + + mutex_lock(&qdio_dbf_list_mutex); + list_for_each_entry(entry, &qdio_dbf_list, dbf_list) { + if (strcmp(entry->dbf_name, name) == 0) { + rc = entry->dbf_info; + break; + } + } + mutex_unlock(&qdio_dbf_list_mutex); + return rc; +} + +static void qdio_clear_dbf_list(void) +{ + struct qdio_dbf_entry *entry, *tmp; + + mutex_lock(&qdio_dbf_list_mutex); + list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) { + list_del(&entry->dbf_list); + debug_unregister(entry->dbf_info); + kfree(entry); + } + mutex_unlock(&qdio_dbf_list_mutex); +} + +int qdio_allocate_dbf(struct qdio_initialize *init_data, struct qdio_irq *irq_ptr) { - char text[20]; + char text[QDIO_DBF_NAME_LEN]; + struct qdio_dbf_entry *new_entry; DBF_EVENT("qfmt:%1d", init_data->q_format); DBF_HEX(init_data->adapter_name, 8); @@ -38,11 +79,34 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data, DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr); /* allocate trace view for the interface */ - snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev)); - irq_ptr->debug_area = debug_register(text, 2, 1, 16); - debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view); - debug_set_level(irq_ptr->debug_area, DBF_WARN); - DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created"); + snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s", + dev_name(&init_data->cdev->dev)); + irq_ptr->debug_area = qdio_get_dbf_entry(text); + if (irq_ptr->debug_area) + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused"); + else { + irq_ptr->debug_area = debug_register(text, 2, 1, 16); + if (!irq_ptr->debug_area) + return -ENOMEM; + if (debug_register_view(irq_ptr->debug_area, + &debug_hex_ascii_view)) { + debug_unregister(irq_ptr->debug_area); + return -ENOMEM; + } + debug_set_level(irq_ptr->debug_area, DBF_WARN); + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created"); + new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL); + if (!new_entry) { + debug_unregister(irq_ptr->debug_area); + return -ENOMEM; + } + strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN); + new_entry->dbf_info = irq_ptr->debug_area; + mutex_lock(&qdio_dbf_list_mutex); + list_add(&new_entry->dbf_list, &qdio_dbf_list); + mutex_unlock(&qdio_dbf_list_mutex); + } + return 0; } static int qstat_show(struct seq_file *m, void *v) @@ -54,11 +118,20 @@ static int qstat_show(struct seq_file *m, void *v) if (!q) return 0; - seq_printf(m, "DSCI: %d nr_used: %d\n", - *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); - seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); - seq_printf(m, "polling: %d ack start: %d ack count: %d\n", - q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); + seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n", + q->timestamp, last_ai_time); + seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n", + atomic_read(&q->nr_buf_used), + q->first_to_check, q->last_move); + if (q->is_input_q) { + seq_printf(m, "polling: %d ack start: %d ack count: %d\n", + q->u.in.polling, q->u.in.ack_start, + q->u.in.ack_count); + seq_printf(m, "DSCI: %d IRQs disabled: %u\n", + *(u32 *)q->irq_ptr->dsci, + test_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)); + } seq_printf(m, "SBAL states:\n"); seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); @@ -69,6 +142,9 @@ static int qstat_show(struct seq_file *m, void *v) case SLSB_P_OUTPUT_NOT_INIT: seq_printf(m, "N"); break; + case SLSB_P_OUTPUT_PENDING: + seq_printf(m, "P"); + break; case SLSB_P_INPUT_PRIMED: case SLSB_CU_OUTPUT_PRIMED: seq_printf(m, "+"); @@ -113,33 +189,16 @@ static int qstat_show(struct seq_file *m, void *v) return 0; } -static ssize_t qstat_seq_write(struct file *file, const char __user *buf, - size_t count, loff_t *off) -{ - struct seq_file *seq = file->private_data; - struct qdio_q *q = seq->private; - - if (!q) - return 0; - if (q->is_input_q) - xchg(q->irq_ptr->dsci, 1); - local_bh_disable(); - tasklet_schedule(&q->tasklet); - local_bh_enable(); - return count; -} - static int qstat_seq_open(struct inode *inode, struct file *filp) { return single_open(filp, qstat_show, - filp->f_path.dentry->d_inode->i_private); + file_inode(filp)->i_private); } static const struct file_operations debugfs_fops = { .owner = THIS_MODULE, .open = qstat_seq_open, .read = seq_read, - .write = qstat_seq_write, .llseek = seq_lseek, .release = single_release, }; @@ -161,12 +220,14 @@ static char *qperf_names[] = { "Inbound queue full", "Outbound calls", "Outbound handler", + "Outbound queue full", "Outbound fast_requeue", "Outbound target_full", "QEBSM eqbs", "QEBSM eqbs partial", "QEBSM sqbs", - "QEBSM sqbs partial" + "QEBSM sqbs partial", + "Discarded interrupts" }; static int qperf_show(struct seq_file *m, void *v) @@ -196,19 +257,13 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, struct qdio_irq *irq_ptr = seq->private; struct qdio_q *q; unsigned long val; - char buf[8]; int ret, i; if (!irq_ptr) return 0; - if (count >= sizeof(buf)) - return -EINVAL; - if (copy_from_user(&buf, ubuf, count)) - return -EFAULT; - buf[count] = 0; - - ret = strict_strtoul(buf, 10, &val); - if (ret < 0) + + ret = kstrtoul_from_user(ubuf, count, 10, &val); + if (ret) return ret; switch (val) { @@ -230,10 +285,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, static int qperf_seq_open(struct inode *inode, struct file *filp) { return single_open(filp, qperf_show, - filp->f_path.dentry->d_inode->i_private); + file_inode(filp)->i_private); } -static struct file_operations debugfs_perf_fops = { +static const struct file_operations debugfs_perf_fops = { .owner = THIS_MODULE, .open = qperf_seq_open, .read = seq_read, @@ -241,7 +296,8 @@ static struct file_operations debugfs_perf_fops = { .llseek = seq_lseek, .release = single_release, }; -static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) + +static void setup_debugfs_entry(struct qdio_q *q) { char name[QDIO_DEBUGFS_NAME_LEN]; @@ -272,12 +328,12 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) irq_ptr->debugfs_perf = NULL; for_each_input_queue(irq_ptr, q, i) - setup_debugfs_entry(q, cdev); + setup_debugfs_entry(q); for_each_output_queue(irq_ptr, q, i) - setup_debugfs_entry(q, cdev); + setup_debugfs_entry(q); } -void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr) { struct qdio_q *q; int i; @@ -308,6 +364,7 @@ int __init qdio_debug_init(void) void qdio_debug_exit(void) { + qdio_clear_dbf_list(); debugfs_remove(debugfs_root); if (qdio_dbf_setup) debug_unregister(qdio_dbf_setup); diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h index 5d70bd162ae..f33ce857761 100644 --- a/drivers/s390/cio/qdio_debug.h +++ b/drivers/s390/cio/qdio_debug.h @@ -1,6 +1,4 @@ /* - * drivers/s390/cio/qdio_debug.h - * * Copyright IBM Corp. 2008 * * Author: Jan Glauber (jang@linux.vnet.ibm.com) @@ -18,12 +16,6 @@ extern debug_info_t *qdio_dbf_setup; extern debug_info_t *qdio_dbf_error; -/* sort out low debug levels early to avoid wasted sprints */ -static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level) -{ - return (level <= dbf_grp->level); -} - #define DBF_ERR 3 /* error conditions */ #define DBF_WARN 4 /* warning conditions */ #define DBF_INFO 6 /* informational */ @@ -39,10 +31,14 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level) debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \ } while (0) -#define DBF_HEX(addr, len) \ - do { \ - debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \ - } while (0) +static inline void DBF_HEX(void *addr, int len) +{ + while (len > 0) { + debug_event(qdio_dbf_setup, DBF_ERR, addr, len); + len -= qdio_dbf_setup->buf_size; + addr += qdio_dbf_setup->buf_size; + } +} #define DBF_ERROR(text...) \ do { \ @@ -51,32 +47,39 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level) debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \ } while (0) -#define DBF_ERROR_HEX(addr, len) \ - do { \ - debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \ - } while (0) - +static inline void DBF_ERROR_HEX(void *addr, int len) +{ + while (len > 0) { + debug_event(qdio_dbf_error, DBF_ERR, addr, len); + len -= qdio_dbf_error->buf_size; + addr += qdio_dbf_error->buf_size; + } +} #define DBF_DEV_EVENT(level, device, text...) \ do { \ char debug_buffer[QDIO_DBF_LEN]; \ - if (qdio_dbf_passes(device->debug_area, level)) { \ + if (debug_level_enabled(device->debug_area, level)) { \ snprintf(debug_buffer, QDIO_DBF_LEN, text); \ debug_text_event(device->debug_area, level, debug_buffer); \ } \ } while (0) -#define DBF_DEV_HEX(level, device, addr, len) \ - do { \ - debug_event(device->debug_area, level, (void*)(addr), len); \ - } while (0) +static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr, + int len, int level) +{ + while (len > 0) { + debug_event(dev->debug_area, level, addr, len); + len -= dev->debug_area->buf_size; + addr += dev->debug_area->buf_size; + } +} -void qdio_allocate_dbf(struct qdio_initialize *init_data, +int qdio_allocate_dbf(struct qdio_initialize *init_data, struct qdio_irq *irq_ptr); void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev); -void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, - struct ccw_device *cdev); +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr); int qdio_debug_init(void); void qdio_debug_exit(void); diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4f8f7431177..848e3b64ea6 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/cio/qdio_main.c - * * Linux for s390 qdio support, buffer handling, qdio API and module support. * - * Copyright 2000,2008 IBM Corp. + * Copyright IBM Corp. 2000, 2008 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> * Jan Glauber <jang@linux.vnet.ibm.com> * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> @@ -13,9 +11,12 @@ #include <linux/kernel.h> #include <linux/timer.h> #include <linux/delay.h> -#include <asm/atomic.h> +#include <linux/gfp.h> +#include <linux/io.h> +#include <linux/atomic.h> #include <asm/debug.h> #include <asm/qdio.h> +#include <asm/ipl.h> #include "cio.h" #include "css.h" @@ -28,11 +29,12 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ MODULE_DESCRIPTION("QDIO base support"); MODULE_LICENSE("GPL"); -static inline int do_siga_sync(struct subchannel_id schid, - unsigned int out_mask, unsigned int in_mask) +static inline int do_siga_sync(unsigned long schid, + unsigned int out_mask, unsigned int in_mask, + unsigned int fc) { - register unsigned long __fc asm ("0") = 2; - register struct subchannel_id __schid asm ("1") = schid; + register unsigned long __fc asm ("0") = fc; + register unsigned long __schid asm ("1") = schid; register unsigned long out asm ("2") = out_mask; register unsigned long in asm ("3") = in_mask; int cc; @@ -46,10 +48,11 @@ static inline int do_siga_sync(struct subchannel_id schid, return cc; } -static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) +static inline int do_siga_input(unsigned long schid, unsigned int mask, + unsigned int fc) { - register unsigned long __fc asm ("0") = 1; - register struct subchannel_id __schid asm ("1") = schid; + register unsigned long __fc asm ("0") = fc; + register unsigned long __schid asm ("1") = schid; register unsigned long __mask asm ("2") = mask; int cc; @@ -58,7 +61,7 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) " ipm %0\n" " srl %0,28\n" : "=d" (cc) - : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory"); + : "d" (__fc), "d" (__schid), "d" (__mask) : "cc"); return cc; } @@ -69,26 +72,27 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer * @fc: function code to perform * - * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION. + * Returns condition code. * Note: For IQDC unicast queues only the highest priority queue is processed. */ static inline int do_siga_output(unsigned long schid, unsigned long mask, - unsigned int *bb, unsigned int fc) + unsigned int *bb, unsigned int fc, + unsigned long aob) { register unsigned long __fc asm("0") = fc; register unsigned long __schid asm("1") = schid; register unsigned long __mask asm("2") = mask; - int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; + register unsigned long __aob asm("3") = aob; + int cc; asm volatile( " siga 0\n" - "0: ipm %0\n" + " ipm %0\n" " srl %0,28\n" - "1:\n" - EX_TABLE(0b, 1b) - : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) - : : "cc", "memory"); - *bb = ((unsigned int) __fc) >> 31; + : "=d" (cc), "+d" (__fc), "+d" (__aob) + : "d" (__schid), "d" (__mask) + : "cc"); + *bb = __fc >> 31; return cc; } @@ -97,9 +101,12 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) /* all done or next buffer state different */ if (ccq == 0 || ccq == 32) return 0; - /* not all buffers processed */ - if (ccq == 96 || ccq == 97) + /* no buffer processed */ + if (ccq == 97) return 1; + /* not all buffers processed */ + if (ccq == 96) + return 2; /* notify devices immediately */ DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); return -EIO; @@ -119,12 +126,9 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, int start, int count, int auto_ack) { + int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; unsigned int ccq = 0; - int tmp_count = count, tmp_start = start; - int nr = q->nr; - int rc; - BUG_ON(!q->irq_ptr->sch_token); qperf_inc(q, eqbs); if (!q->is_input_q) @@ -133,29 +137,33 @@ again: ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, auto_ack); rc = qdio_check_ccq(q, ccq); - - /* At least one buffer was processed, return and extract the remaining - * buffers later. - */ - if ((ccq == 96) && (count != tmp_count)) { - qperf_inc(q, eqbs_partial); - return (count - tmp_count); - } + if (!rc) + return count - tmp_count; if (rc == 1) { DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); goto again; } - if (rc < 0) { - DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); - DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); - q->handler(q->irq_ptr->cdev, - QDIO_ERROR_ACTIVATE_CHECK_CONDITION, - 0, -1, -1, q->irq_ptr->int_parm); - return 0; + if (rc == 2) { + qperf_inc(q, eqbs_partial); + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", + tmp_count); + /* + * Retry once, if that fails bail out and process the + * extracted buffers before trying again. + */ + if (!retried++) + goto again; + else + return count - tmp_count; } - return count - tmp_count; + + DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, + q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); + return 0; } /** @@ -179,8 +187,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, if (!count) return 0; - - BUG_ON(!q->irq_ptr->sch_token); qperf_inc(q, sqbs); if (!q->is_input_q) @@ -188,41 +194,44 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, again: ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); rc = qdio_check_ccq(q, ccq); - if (rc == 1) { + if (!rc) { + WARN_ON_ONCE(tmp_count); + return count - tmp_count; + } + + if (rc == 1 || rc == 2) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); qperf_inc(q, sqbs_partial); goto again; } - if (rc < 0) { - DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); - DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); - q->handler(q->irq_ptr->cdev, - QDIO_ERROR_ACTIVATE_CHECK_CONDITION, - 0, -1, -1, q->irq_ptr->int_parm); - return 0; - } - WARN_ON(tmp_count); - return count - tmp_count; + + DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); + DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, + q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); + return 0; } /* returns number of examined buffers and their common state in *state */ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, unsigned char *state, unsigned int count, - int auto_ack) + int auto_ack, int merge_pending) { unsigned char __state = 0; int i; - BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); - BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); - if (is_qebsm(q)) return qdio_do_eqbs(q, state, bufnr, count, auto_ack); for (i = 0; i < count; i++) { - if (!__state) + if (!__state) { __state = q->slsb.val[bufnr]; - else if (q->slsb.val[bufnr] != __state) + if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) + __state = SLSB_P_OUTPUT_EMPTY; + } else if (merge_pending) { + if ((q->slsb.val[bufnr] & __state) != __state) + break; + } else if (q->slsb.val[bufnr] != __state) break; bufnr = next_buf(bufnr); } @@ -233,7 +242,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, int auto_ack) { - return get_buf_states(q, bufnr, state, 1, auto_ack); + return get_buf_states(q, bufnr, state, 1, auto_ack, 0); } /* wrap-around safe setting of slsb states, returns number of changed buffers */ @@ -242,9 +251,6 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr, { int i; - BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); - BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); - if (is_qebsm(q)) return qdio_do_sqbs(q, state, bufnr, count); @@ -262,7 +268,7 @@ static inline int set_buf_state(struct qdio_q *q, int bufnr, } /* set slsb states to initial state */ -void qdio_init_buf_states(struct qdio_irq *irq_ptr) +static void qdio_init_buf_states(struct qdio_irq *irq_ptr) { struct qdio_q *q; int i; @@ -278,18 +284,22 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr) static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, unsigned int input) { + unsigned long schid = *((u32 *) &q->irq_ptr->schid); + unsigned int fc = QDIO_SIGA_SYNC; int cc; - if (!need_siga_sync(q)) - return 0; - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); qperf_inc(q, siga_sync); - cc = do_siga_sync(q->irq_ptr->schid, output, input); - if (cc) + if (is_qebsm(q)) { + schid = q->irq_ptr->sch_token; + fc |= QDIO_SIGA_QEBSM_FLAG; + } + + cc = do_siga_sync(schid, output, input, fc); + if (unlikely(cc)) DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); - return cc; + return (cc) ? -EIO : 0; } static inline int qdio_siga_sync_q(struct qdio_q *q) @@ -300,79 +310,86 @@ static inline int qdio_siga_sync_q(struct qdio_q *q) return qdio_siga_sync(q, q->mask, 0); } -static inline int qdio_siga_sync_out(struct qdio_q *q) +static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, + unsigned long aob) { - return qdio_siga_sync(q, ~0U, 0); -} - -static inline int qdio_siga_sync_all(struct qdio_q *q) -{ - return qdio_siga_sync(q, ~0U, ~0U); -} - -static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) -{ - unsigned long schid; - unsigned int fc = 0; + unsigned long schid = *((u32 *) &q->irq_ptr->schid); + unsigned int fc = QDIO_SIGA_WRITE; u64 start_time = 0; - int cc; + int retries = 0, cc; + unsigned long laob = 0; - if (q->u.out.use_enh_siga) - fc = 3; + if (q->u.out.use_cq && aob != 0) { + fc = QDIO_SIGA_WRITEQ; + laob = aob; + } if (is_qebsm(q)) { schid = q->irq_ptr->sch_token; - fc |= 0x80; + fc |= QDIO_SIGA_QEBSM_FLAG; } - else - schid = *((u32 *)&q->irq_ptr->schid); - again: - cc = do_siga_output(schid, q->mask, busy_bit, fc); + WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) || + (aob && fc != QDIO_SIGA_WRITEQ)); + cc = do_siga_output(schid, q->mask, busy_bit, fc, laob); /* hipersocket busy condition */ - if (*busy_bit) { - WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); + if (unlikely(*busy_bit)) { + retries++; if (!start_time) { - start_time = get_usecs(); + start_time = get_tod_clock_fast(); goto again; } - if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) + if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE) goto again; } + if (retries) { + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, + "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries); + } return cc; } static inline int qdio_siga_input(struct qdio_q *q) { + unsigned long schid = *((u32 *) &q->irq_ptr->schid); + unsigned int fc = QDIO_SIGA_READ; int cc; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); qperf_inc(q, siga_read); - cc = do_siga_input(q->irq_ptr->schid, q->mask); - if (cc) + if (is_qebsm(q)) { + schid = q->irq_ptr->sch_token; + fc |= QDIO_SIGA_QEBSM_FLAG; + } + + cc = do_siga_input(schid, q->mask, fc); + if (unlikely(cc)) DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); - return cc; + return (cc) ? -EIO : 0; } -static inline void qdio_sync_after_thinint(struct qdio_q *q) +#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0) +#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U) + +static inline void qdio_sync_queues(struct qdio_q *q) { - if (pci_out_supported(q)) { - if (need_siga_sync_thinint(q)) - qdio_siga_sync_all(q); - else if (need_siga_sync_out_thinint(q)) - qdio_siga_sync_out(q); - } else + /* PCI capable outbound queues will also be scanned so sync them too */ + if (pci_out_supported(q)) + qdio_siga_sync_all(q); + else qdio_siga_sync_q(q); } int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state) { - qdio_siga_sync_q(q); - return get_buf_states(q, bufnr, state, 1, 0); + if (need_siga_sync(q)) + qdio_siga_sync_q(q); + return get_buf_states(q, bufnr, state, 1, 0, 0); } static inline void qdio_stop_polling(struct qdio_q *q) @@ -392,39 +409,48 @@ static inline void qdio_stop_polling(struct qdio_q *q) set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); } -static inline void account_sbals(struct qdio_q *q, int count) +static inline void account_sbals(struct qdio_q *q, unsigned int count) { - int pos = 0; + int pos; q->q_stats.nr_sbal_total += count; if (count == QDIO_MAX_BUFFERS_MASK) { q->q_stats.nr_sbals[7]++; return; } - while (count >>= 1) - pos++; + pos = ilog2(count); q->q_stats.nr_sbals[pos]++; } -static void announce_buffer_error(struct qdio_q *q, int count) +static void process_buffer_error(struct qdio_q *q, int count) { - q->qdio_error |= QDIO_ERROR_SLSB_STATE; + unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : + SLSB_P_OUTPUT_NOT_INIT; + + q->qdio_error = QDIO_ERROR_SLSB_STATE; /* special handling for no target buffer empty */ if ((!q->is_input_q && - (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { + (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) { qperf_inc(q, target_full); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", q->first_to_check); - return; + goto set; } DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); DBF_ERROR("F14:%2x F15:%2x", - q->sbal[q->first_to_check]->element[14].flags & 0xff, - q->sbal[q->first_to_check]->element[15].flags & 0xff); + q->sbal[q->first_to_check]->element[14].sflags, + q->sbal[q->first_to_check]->element[15].sflags); + +set: + /* + * Interrupts may be avoided as long as the error is present + * so change the buffer state immediately to avoid starvation. + */ + set_buf_states(q, q->first_to_check, state, count); } static inline void inbound_primed(struct qdio_q *q, int count) @@ -475,7 +501,9 @@ static inline void inbound_primed(struct qdio_q *q, int count) static int get_inbound_buffer_frontier(struct qdio_q *q) { int count, stop; - unsigned char state; + unsigned char state = 0; + + q->timestamp = get_tod_clock_fast(); /* * Don't check 128 buffers, as otherwise qdio_inbound_q_moved @@ -491,7 +519,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) * No siga sync here, as a PCI or we after a thin interrupt * already sync'ed the queues. */ - count = get_buf_states(q, q->first_to_check, &state, count, 1); + count = get_buf_states(q, q->first_to_check, &state, count, 1, 0); if (!count) goto out; @@ -499,14 +527,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) case SLSB_P_INPUT_PRIMED: inbound_primed(q, count); q->first_to_check = add_buf(q->first_to_check, count); - if (atomic_sub(count, &q->nr_buf_used) == 0) + if (atomic_sub_return(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); break; case SLSB_P_INPUT_ERROR: - announce_buffer_error(q, count); - /* process the buffer, the upper layer will take care of it */ + process_buffer_error(q, count); q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled) @@ -520,7 +547,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); break; default: - BUG(); + WARN_ON_ONCE(1); } out: return q->first_to_check; @@ -532,10 +559,10 @@ static int qdio_inbound_q_moved(struct qdio_q *q) bufnr = get_inbound_buffer_frontier(q); - if ((bufnr != q->last_move) || q->qdio_error) { + if (bufnr != q->last_move) { q->last_move = bufnr; if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) - q->u.in.timestamp = get_usecs(); + q->u.in.timestamp = get_tod_clock(); return 1; } else return 0; @@ -548,7 +575,8 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) if (!atomic_read(&q->nr_buf_used)) return 1; - qdio_siga_sync_q(q); + if (need_siga_sync(q)) + qdio_siga_sync_q(q); get_buf_state(q, q->first_to_check, &state, 0); if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) @@ -566,7 +594,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) * At this point we know, that inbound first_to_check * has (probably) not moved (see qdio_inbound_processing). */ - if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { + if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", q->first_to_check); return 1; @@ -574,6 +602,60 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) return 0; } +static inline int contains_aobs(struct qdio_q *q) +{ + return !q->is_input_q && q->u.out.use_cq; +} + +static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) +{ + unsigned char state = 0; + int j, b = start; + + if (!contains_aobs(q)) + return; + + for (j = 0; j < count; ++j) { + get_buf_state(q, b, &state, 0); + if (state == SLSB_P_OUTPUT_PENDING) { + struct qaob *aob = q->u.out.aobs[b]; + if (aob == NULL) + continue; + + q->u.out.sbal_state[b].flags |= + QDIO_OUTBUF_STATE_FLAG_PENDING; + q->u.out.aobs[b] = NULL; + } else if (state == SLSB_P_OUTPUT_EMPTY) { + q->u.out.sbal_state[b].aob = NULL; + } + b = next_buf(b); + } +} + +static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, + int bufnr) +{ + unsigned long phys_aob = 0; + + if (!q->use_cq) + goto out; + + if (!q->aobs[bufnr]) { + struct qaob *aob = qdio_allocate_aob(); + q->aobs[bufnr] = aob; + } + if (q->aobs[bufnr]) { + q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; + q->sbal_state[bufnr].aob = q->aobs[bufnr]; + q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; + phys_aob = virt_to_phys(q->aobs[bufnr]); + WARN_ON_ONCE(phys_aob & 0xFF); + } + +out: + return phys_aob; +} + static void qdio_kick_handler(struct qdio_q *q) { int start = q->first_to_kick; @@ -594,6 +676,8 @@ static void qdio_kick_handler(struct qdio_q *q) start, count); } + qdio_handle_aobs(q, start, count); + q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, q->irq_ptr->int_parm); @@ -605,7 +689,7 @@ static void qdio_kick_handler(struct qdio_q *q) static void __qdio_inbound_processing(struct qdio_q *q) { qperf_inc(q, tasklet_inbound); -again: + if (!qdio_inbound_q_moved(q)) return; @@ -614,7 +698,10 @@ again: if (!qdio_inbound_q_done(q)) { /* means poll time is not yet over */ qperf_inc(q, tasklet_inbound_resched); - goto again; + if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { + tasklet_schedule(&q->tasklet); + return; + } } qdio_stop_polling(q); @@ -624,7 +711,8 @@ again: */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); - goto again; + if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) + tasklet_schedule(&q->tasklet); } } @@ -637,11 +725,16 @@ void qdio_inbound_processing(unsigned long data) static int get_outbound_buffer_frontier(struct qdio_q *q) { int count, stop; - unsigned char state; + unsigned char state = 0; - if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || - (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) - qdio_siga_sync_q(q); + q->timestamp = get_tod_clock_fast(); + + if (need_siga_sync(q)) + if (((queue_type(q) != QDIO_IQDIO_QFMT) && + !pci_out_supported(q)) || + (queue_type(q) == QDIO_IQDIO_QFMT && + multicast_outbound(q))) + qdio_siga_sync_q(q); /* * Don't check 128 buffers, as otherwise qdio_inbound_q_moved @@ -649,27 +742,27 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) */ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); stop = add_buf(q->first_to_check, count); - if (q->first_to_check == stop) - return q->first_to_check; + goto out; - count = get_buf_states(q, q->first_to_check, &state, count, 0); + count = get_buf_states(q, q->first_to_check, &state, count, 0, 1); if (!count) - return q->first_to_check; + goto out; switch (state) { case SLSB_P_OUTPUT_EMPTY: /* the adapter got it */ - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, + "out empty:%1d %02x", q->nr, count); atomic_sub(count, &q->nr_buf_used); q->first_to_check = add_buf(q->first_to_check, count); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); + break; case SLSB_P_OUTPUT_ERROR: - announce_buffer_error(q, count); - /* process the buffer, the upper layer will take care of it */ + process_buffer_error(q, count); q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled) @@ -679,14 +772,17 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) /* the adapter has not fetched the output yet */ if (q->irq_ptr->perf_stat_enabled) q->q_stats.nr_sbal_nop++; - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", + q->nr); break; case SLSB_P_OUTPUT_NOT_INIT: case SLSB_P_OUTPUT_HALTED: break; default: - BUG(); + WARN_ON_ONCE(1); } + +out: return q->first_to_check; } @@ -702,7 +798,7 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q) bufnr = get_outbound_buffer_frontier(q); - if ((bufnr != q->last_move) || q->qdio_error) { + if (bufnr != q->last_move) { q->last_move = bufnr; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); return 1; @@ -710,40 +806,52 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q) return 0; } -static int qdio_kick_outbound_q(struct qdio_q *q) +static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) { + int retries = 0, cc; unsigned int busy_bit; - int cc; if (!need_siga_out(q)) return 0; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); +retry: qperf_inc(q, siga_write); - cc = qdio_siga_output(q, &busy_bit); + cc = qdio_siga_output(q, &busy_bit, aob); switch (cc) { case 0: break; case 2: if (busy_bit) { - DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); - cc |= QDIO_ERROR_SIGA_BUSY; - } else + while (++retries < QDIO_BUSY_BIT_RETRIES) { + mdelay(QDIO_BUSY_BIT_RETRY_DELAY); + goto retry; + } + DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); + cc = -EBUSY; + } else { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); + cc = -ENOBUFS; + } break; case 1: case 3: DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); + cc = -EIO; break; } + if (retries) { + DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); + DBF_ERROR("count:%u", retries); + } return cc; } static void __qdio_outbound_processing(struct qdio_q *q) { qperf_inc(q, tasklet_outbound); - BUG_ON(atomic_read(&q->nr_buf_used) < 0); + WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0); if (qdio_outbound_q_moved(q)) qdio_kick_handler(q); @@ -752,21 +860,13 @@ static void __qdio_outbound_processing(struct qdio_q *q) if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) goto sched; - /* bail out for HiperSockets unicast queues */ - if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) - return; - - if ((queue_type(q) == QDIO_IQDIO_QFMT) && - (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) - goto sched; - if (q->u.out.pci_out_enabled) return; /* * Now we know that queue type is either qeth without pci enabled - * or HiperSockets multicast. Make sure buffer switch from PRIMED to - * EMPTY is noticed and outbound_handler is called after some time. + * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY + * is noticed and outbound_handler is called after some time. */ if (qdio_outbound_q_done(q)) del_timer(&q->u.out.timer); @@ -813,7 +913,8 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) static void __tiqdio_inbound_processing(struct qdio_q *q) { qperf_inc(q, tasklet_inbound); - qdio_sync_after_thinint(q); + if (need_siga_sync(q) && need_siga_sync_after_ai(q)) + qdio_sync_queues(q); /* * The interrupt could be caused by a PCI request. Check the @@ -879,8 +980,20 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) return; - for_each_input_queue(irq_ptr, q, i) - tasklet_schedule(&q->tasklet); + for_each_input_queue(irq_ptr, q, i) { + if (q->u.in.queue_start_poll) { + /* skip if polling is enabled or already in work */ + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) { + qperf_inc(q, int_discarded); + continue; + } + q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, + q->irq_ptr->int_parm); + } else { + tasklet_schedule(&q->tasklet); + } + } if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) return; @@ -888,10 +1001,8 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) for_each_output_queue(irq_ptr, q, i) { if (qdio_outbound_q_done(q)) continue; - - if (!siga_syncs_out_pci(q)) + if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) qdio_siga_sync_q(q); - tasklet_schedule(&q->tasklet); } } @@ -901,6 +1012,7 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, { struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_q *q; + int count; DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); DBF_ERROR("intp :%lx", intparm); @@ -914,10 +1026,17 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, dump_stack(); goto no_handler; } - q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, - 0, -1, -1, irq_ptr->int_parm); + + count = sub_buf(q->first_to_check, q->first_to_kick); + q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE, + q->nr, q->first_to_kick, count, irq_ptr->int_parm); no_handler: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); + /* + * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen. + * Therefore we call the LGR detection function here. + */ + lgr_info_log(); } static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, @@ -954,17 +1073,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, return; } + if (irq_ptr->perf_stat_enabled) + irq_ptr->perf_stat.qdio_int++; + if (IS_ERR(irb)) { - switch (PTR_ERR(irb)) { - case -EIO: - DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - wake_up(&cdev->private->wait_q); - return; - default: - WARN_ON(1); - return; - } + DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + wake_up(&cdev->private->wait_q); + return; } qdio_irq_check_sense(irq_ptr, irb); cstat = irb->scsw.cmd.cstat; @@ -990,7 +1106,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, case QDIO_IRQ_STATE_STOPPED: break; default: - WARN_ON(1); + WARN_ON_ONCE(1); } wake_up(&cdev->private->wait_q); } @@ -1015,30 +1131,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev, } EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); -/** - * qdio_cleanup - shutdown queues and free data structures - * @cdev: associated ccw device - * @how: use halt or clear to shutdown - * - * This function calls qdio_shutdown() for @cdev with method @how. - * and qdio_free(). The qdio_free() return value is ignored since - * !irq_ptr is already checked. - */ -int qdio_cleanup(struct ccw_device *cdev, int how) -{ - struct qdio_irq *irq_ptr = cdev->private->qdio_data; - int rc; - - if (!irq_ptr) - return -ENODEV; - - rc = qdio_shutdown(cdev, how); - - qdio_free(cdev); - return rc; -} -EXPORT_SYMBOL_GPL(qdio_cleanup); - static void qdio_shutdown_queues(struct ccw_device *cdev) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; @@ -1068,7 +1160,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) if (!irq_ptr) return -ENODEV; - BUG_ON(irqs_disabled()); + WARN_ON_ONCE(irqs_disabled()); DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); mutex_lock(&irq_ptr->setup_mutex); @@ -1089,7 +1181,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) tiqdio_remove_input_queues(irq_ptr); qdio_shutdown_queues(cdev); - qdio_shutdown_debug_entries(irq_ptr, cdev); + qdio_shutdown_debug_entries(irq_ptr); /* cleanup subchannel */ spin_lock_irqsave(get_ccwdev_lock(cdev), flags); @@ -1141,12 +1233,10 @@ int qdio_free(struct ccw_device *cdev) return -ENODEV; DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); + DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); mutex_lock(&irq_ptr->setup_mutex); - if (irq_ptr->debug_area != NULL) { - debug_unregister(irq_ptr->debug_area); - irq_ptr->debug_area = NULL; - } + irq_ptr->debug_area = NULL; cdev->private->qdio_data = NULL; mutex_unlock(&irq_ptr->setup_mutex); @@ -1156,28 +1246,6 @@ int qdio_free(struct ccw_device *cdev) EXPORT_SYMBOL_GPL(qdio_free); /** - * qdio_initialize - allocate and establish queues for a qdio subchannel - * @init_data: initialization data - * - * This function first allocates queues via qdio_allocate() and on success - * establishes them via qdio_establish(). - */ -int qdio_initialize(struct qdio_initialize *init_data) -{ - int rc; - - rc = qdio_allocate(init_data); - if (rc) - return rc; - - rc = qdio_establish(init_data); - if (rc) - qdio_free(init_data->cdev); - return rc; -} -EXPORT_SYMBOL_GPL(qdio_initialize); - -/** * qdio_allocate - allocate qdio queues and associated data * @init_data: initialization data */ @@ -1205,7 +1273,8 @@ int qdio_allocate(struct qdio_initialize *init_data) goto out_err; mutex_init(&irq_ptr->setup_mutex); - qdio_allocate_dbf(init_data, irq_ptr); + if (qdio_allocate_dbf(init_data, irq_ptr)) + goto out_rel; /* * Allocate a page for the chsc calls in qdio_establish. @@ -1221,7 +1290,6 @@ int qdio_allocate(struct qdio_initialize *init_data) irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!irq_ptr->qdr) goto out_rel; - WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, init_data->no_output_qs)) @@ -1237,6 +1305,26 @@ out_err: } EXPORT_SYMBOL_GPL(qdio_allocate); +static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q = irq_ptr->input_qs[0]; + int i, use_cq = 0; + + if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT) + use_cq = 1; + + for_each_output_queue(irq_ptr, q, i) { + if (use_cq) { + if (qdio_enable_async_operation(&q->u.out) < 0) { + use_cq = 0; + continue; + } + } else + qdio_disable_async_operation(&q->u.out); + } + DBF_EVENT("use_cq:%d", use_cq); +} + /** * qdio_establish - establish queues on a qdio subchannel * @init_data: initialization data @@ -1300,8 +1388,8 @@ int qdio_establish(struct qdio_initialize *init_data) } qdio_setup_ssqd_info(irq_ptr); - DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc); - DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); + + qdio_detect_hsicq(irq_ptr); /* qebsm is now setup if available, initialize buffer states */ qdio_init_buf_states(irq_ptr); @@ -1407,7 +1495,7 @@ static inline int buf_in_between(int bufnr, int start, int count) static int handle_inbound(struct qdio_q *q, unsigned int callflags, int bufnr, int count) { - int used, diff; + int diff; qperf_inc(q, inbound_call); @@ -1440,16 +1528,11 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, set: count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); - - used = atomic_add_return(count, &q->nr_buf_used) - count; - BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); - - /* no need to signal as long as the adapter had free buffers */ - if (used) - return 0; + atomic_add(count, &q->nr_buf_used); if (need_siga_in(q)) return qdio_siga_input(q); + return 0; } @@ -1463,61 +1546,50 @@ set: static int handle_outbound(struct qdio_q *q, unsigned int callflags, int bufnr, int count) { - unsigned char state; + unsigned char state = 0; int used, rc = 0; qperf_inc(q, outbound_call); count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); used = atomic_add_return(count, &q->nr_buf_used); - BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); + + if (used == QDIO_MAX_BUFFERS_PER_Q) + qperf_inc(q, outbound_queue_full); if (callflags & QDIO_FLAG_PCI_OUT) { q->u.out.pci_out_enabled = 1; qperf_inc(q, pci_request_int); - } - else + } else q->u.out.pci_out_enabled = 0; if (queue_type(q) == QDIO_IQDIO_QFMT) { - if (multicast_outbound(q)) - rc = qdio_kick_outbound_q(q); - else - if ((q->irq_ptr->ssqd_desc.mmwc > 1) && - (count > 1) && - (count <= q->irq_ptr->ssqd_desc.mmwc)) { - /* exploit enhanced SIGA */ - q->u.out.use_enh_siga = 1; - rc = qdio_kick_outbound_q(q); - } else { - /* - * One siga-w per buffer required for unicast - * HiperSockets. - */ - q->u.out.use_enh_siga = 0; - while (count--) { - rc = qdio_kick_outbound_q(q); - if (rc) - goto out; - } - } - goto out; - } + unsigned long phys_aob = 0; - if (need_siga_sync(q)) { - qdio_siga_sync_q(q); - goto out; + /* One SIGA-W per buffer required for unicast HSI */ + WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); + + phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); + + rc = qdio_kick_outbound_q(q, phys_aob); + } else if (need_siga_sync(q)) { + rc = qdio_siga_sync_q(q); + } else { + /* try to fast requeue buffers */ + get_buf_state(q, prev_buf(bufnr), &state, 0); + if (state != SLSB_CU_OUTPUT_PRIMED) + rc = qdio_kick_outbound_q(q, 0); + else + qperf_inc(q, fast_requeue); } - /* try to fast requeue buffers */ - get_buf_state(q, prev_buf(bufnr), &state, 0); - if (state != SLSB_CU_OUTPUT_PRIMED) - rc = qdio_kick_outbound_q(q); + /* in case of SIGA errors we must process the error immediately */ + if (used >= q->u.out.scan_threshold || rc) + tasklet_schedule(&q->tasklet); else - qperf_inc(q, fast_requeue); - -out: - tasklet_schedule(&q->tasklet); + /* free the SBALs in case of no further traffic */ + if (!timer_pending(&q->u.out.timer)) + mod_timer(&q->u.out.timer, jiffies + HZ); return rc; } @@ -1545,8 +1617,9 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, "do%02x b:%02x c:%02x", callflags, bufnr, count); if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) - return -EBUSY; - + return -EIO; + if (!count) + return 0; if (callflags & QDIO_FLAG_SYNC_INPUT) return handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr, count); @@ -1557,30 +1630,241 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, } EXPORT_SYMBOL_GPL(do_QDIO); +/** + * qdio_start_irq - process input buffers + * @cdev: associated ccw_device for the qdio subchannel + * @nr: input queue number + * + * Return codes + * 0 - success + * 1 - irqs not started since new data is available + */ +int qdio_start_irq(struct ccw_device *cdev, int nr) +{ + struct qdio_q *q; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (!irq_ptr) + return -ENODEV; + q = irq_ptr->input_qs[nr]; + + clear_nonshared_ind(irq_ptr); + qdio_stop_polling(q); + clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); + + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (test_nonshared_ind(irq_ptr)) + goto rescan; + if (!qdio_inbound_q_done(q)) + goto rescan; + return 0; + +rescan: + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) + return 0; + else + return 1; + +} +EXPORT_SYMBOL(qdio_start_irq); + +/** + * qdio_get_next_buffers - process input buffers + * @cdev: associated ccw_device for the qdio subchannel + * @nr: input queue number + * @bufnr: first filled buffer number + * @error: buffers are in error state + * + * Return codes + * < 0 - error + * = 0 - no new buffers found + * > 0 - number of processed buffers + */ +int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, + int *error) +{ + struct qdio_q *q; + int start, end; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (!irq_ptr) + return -ENODEV; + q = irq_ptr->input_qs[nr]; + + /* + * Cannot rely on automatic sync after interrupt since queues may + * also be examined without interrupt. + */ + if (need_siga_sync(q)) + qdio_sync_queues(q); + + /* check the PCI capable outbound queues. */ + qdio_check_outbound_after_thinint(q); + + if (!qdio_inbound_q_moved(q)) + return 0; + + /* Note: upper-layer MUST stop processing immediately here ... */ + if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) + return -EIO; + + start = q->first_to_kick; + end = q->first_to_check; + *bufnr = start; + *error = q->qdio_error; + + /* for the next time */ + q->first_to_kick = end; + q->qdio_error = 0; + return sub_buf(end, start); +} +EXPORT_SYMBOL(qdio_get_next_buffers); + +/** + * qdio_stop_irq - disable interrupt processing for the device + * @cdev: associated ccw_device for the qdio subchannel + * @nr: input queue number + * + * Return codes + * 0 - interrupts were already disabled + * 1 - interrupts successfully disabled + */ +int qdio_stop_irq(struct ccw_device *cdev, int nr) +{ + struct qdio_q *q; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (!irq_ptr) + return -ENODEV; + q = irq_ptr->input_qs[nr]; + + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) + return 0; + else + return 1; +} +EXPORT_SYMBOL(qdio_stop_irq); + +/** + * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info. + * @schid: Subchannel ID. + * @cnc: Boolean Change-Notification Control + * @response: Response code will be stored at this address + * @cb: Callback function will be executed for each element + * of the address list + * @priv: Pointer passed from the caller to qdio_pnso_brinfo() + * @type: Type of the address entry passed to the callback + * @entry: Entry containg the address of the specified type + * @priv: Pointer to pass to the callback function. + * + * Performs "Store-network-bridging-information list" operation and calls + * the callback function for every entry in the list. If "change- + * notification-control" is set, further changes in the address list + * will be reported via the IPA command. + */ +int qdio_pnso_brinfo(struct subchannel_id schid, + int cnc, u16 *response, + void (*cb)(void *priv, enum qdio_brinfo_entry_type type, + void *entry), + void *priv) +{ + struct chsc_pnso_area *rr; + int rc; + u32 prev_instance = 0; + int isfirstblock = 1; + int i, size, elems; + + rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL); + if (rr == NULL) + return -ENOMEM; + do { + /* on the first iteration, naihdr.resume_token will be zero */ + rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc); + if (rc != 0 && rc != -EBUSY) + goto out; + if (rr->response.code != 1) { + rc = -EIO; + continue; + } else + rc = 0; + + if (cb == NULL) + continue; + + size = rr->naihdr.naids; + elems = (rr->response.length - + sizeof(struct chsc_header) - + sizeof(struct chsc_brinfo_naihdr)) / + size; + + if (!isfirstblock && (rr->naihdr.instance != prev_instance)) { + /* Inform the caller that they need to scrap */ + /* the data that was already reported via cb */ + rc = -EAGAIN; + break; + } + isfirstblock = 0; + prev_instance = rr->naihdr.instance; + for (i = 0; i < elems; i++) + switch (size) { + case sizeof(struct qdio_brinfo_entry_l3_ipv6): + (*cb)(priv, l3_ipv6_addr, + &rr->entries.l3_ipv6[i]); + break; + case sizeof(struct qdio_brinfo_entry_l3_ipv4): + (*cb)(priv, l3_ipv4_addr, + &rr->entries.l3_ipv4[i]); + break; + case sizeof(struct qdio_brinfo_entry_l2): + (*cb)(priv, l2_addr_lnid, + &rr->entries.l2[i]); + break; + default: + WARN_ON_ONCE(1); + rc = -EIO; + goto out; + } + } while (rr->response.code == 0x0107 || /* channel busy */ + (rr->response.code == 1 && /* list stored */ + /* resume token is non-zero => list incomplete */ + (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2))); + (*response) = rr->response.code; + +out: + free_page((unsigned long)rr); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_pnso_brinfo); + static int __init init_QDIO(void) { int rc; - rc = qdio_setup_init(); + rc = qdio_debug_init(); if (rc) return rc; + rc = qdio_setup_init(); + if (rc) + goto out_debug; rc = tiqdio_allocate_memory(); if (rc) goto out_cache; - rc = qdio_debug_init(); - if (rc) - goto out_ti; rc = tiqdio_register_thinints(); if (rc) - goto out_debug; + goto out_ti; return 0; -out_debug: - qdio_debug_exit(); out_ti: tiqdio_free_memory(); out_cache: qdio_setup_exit(); +out_debug: + qdio_debug_exit(); return rc; } @@ -1588,8 +1872,8 @@ static void __exit exit_QDIO(void) { tiqdio_unregister_thinints(); tiqdio_free_memory(); - qdio_debug_exit(); qdio_setup_exit(); + qdio_debug_exit(); } module_init(init_QDIO); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 7f4a7546514..f5f4a91fab4 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -1,13 +1,12 @@ /* - * driver/s390/cio/qdio_setup.c - * * qdio queue initialization * - * Copyright (C) IBM Corp. 2008 + * Copyright IBM Corp. 2008 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/export.h> #include <asm/qdio.h> #include "cio.h" @@ -19,6 +18,19 @@ #include "qdio_debug.h" static struct kmem_cache *qdio_q_cache; +static struct kmem_cache *qdio_aob_cache; + +struct qaob *qdio_allocate_aob(void) +{ + return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC); +} +EXPORT_SYMBOL_GPL(qdio_allocate_aob); + +void qdio_release_aob(struct qaob *aob) +{ + kmem_cache_free(qdio_aob_cache, aob); +} +EXPORT_SYMBOL_GPL(qdio_release_aob); /* * qebsm is only available under 64bit but the adapter sets the feature @@ -106,10 +118,12 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, qdio_handler_t *handler, int i) { - /* must be cleared by every qdio_establish */ - memset(q, 0, ((char *)&q->slib) - ((char *)q)); - memset(q->slib, 0, PAGE_SIZE); + struct slib *slib = q->slib; + /* queue must be cleared for qdio_establish */ + memset(q, 0, sizeof(*q)); + memset(slib, 0, PAGE_SIZE); + q->slib = slib; q->irq_ptr = irq_ptr; q->mask = 1 << (31 - i); q->nr = i; @@ -126,10 +140,8 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); /* fill in sbal */ - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) q->sbal[j] = *sbals_array++; - BUG_ON((unsigned long)q->sbal[j] & 0xff); - } /* fill in slib */ if (i > 0) { @@ -152,29 +164,39 @@ static void setup_queues(struct qdio_irq *irq_ptr, struct qdio_q *q; void **input_sbal_array = qdio_init->input_sbal_addr_array; void **output_sbal_array = qdio_init->output_sbal_addr_array; + struct qdio_outbuf_state *output_sbal_state_array = + qdio_init->output_sbal_state_array; int i; for_each_input_queue(irq_ptr, q, i) { - DBF_EVENT("in-q:%1d", i); + DBF_EVENT("inq:%1d", i); setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); q->is_input_q = 1; + q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ? + qdio_init->queue_start_poll_array[i] : NULL; + setup_storage_lists(q, irq_ptr, input_sbal_array, i); input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; - if (is_thinint_irq(irq_ptr)) + if (is_thinint_irq(irq_ptr)) { tasklet_init(&q->tasklet, tiqdio_inbound_processing, (unsigned long) q); - else + } else { tasklet_init(&q->tasklet, qdio_inbound_processing, (unsigned long) q); + } } for_each_output_queue(irq_ptr, q, i) { DBF_EVENT("outq:%1d", i); setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); + q->u.out.sbal_state = output_sbal_state_array; + output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; + q->is_input_q = 0; + q->u.out.scan_threshold = qdio_init->scan_threshold; setup_storage_lists(q, irq_ptr, output_sbal_array, i); output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; @@ -193,14 +215,10 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) irq_ptr->siga_flag.output = 1; if (qdioac & AC1_SIGA_SYNC_NEEDED) irq_ptr->siga_flag.sync = 1; - if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT) - irq_ptr->siga_flag.no_sync_ti = 1; - if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI) - irq_ptr->siga_flag.no_sync_out_pci = 1; - - if (irq_ptr->siga_flag.no_sync_out_pci && - irq_ptr->siga_flag.no_sync_ti) - irq_ptr->siga_flag.no_sync_out_ti = 1; + if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)) + irq_ptr->siga_flag.sync_after_ai = 1; + if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)) + irq_ptr->siga_flag.sync_out_after_pci = 1; } static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, @@ -236,40 +254,31 @@ int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, int rc; DBF_EVENT("getssqd:%4x", schid->sch_no); - if (irq_ptr != NULL) - ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; - else + if (!irq_ptr) { ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); - memset(ssqd, 0, PAGE_SIZE); - - ssqd->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0024, - }; - ssqd->first_sch = schid->sch_no; - ssqd->last_sch = schid->sch_no; - ssqd->ssid = schid->ssid; - - if (chsc(ssqd)) - return -EIO; - rc = chsc_error_from_response(ssqd->response.code); + if (!ssqd) + return -ENOMEM; + } else { + ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; + } + + rc = chsc_ssqd(*schid, ssqd); if (rc) - return rc; + goto out; if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || (ssqd->qdio_ssqd.sch != schid->sch_no)) - return -EINVAL; - - if (irq_ptr != NULL) - memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, - sizeof(struct qdio_ssqd_desc)); - else { - memcpy(data, &ssqd->qdio_ssqd, - sizeof(struct qdio_ssqd_desc)); + rc = -EINVAL; + + if (!rc) + memcpy(data, &ssqd->qdio_ssqd, sizeof(*data)); + +out: + if (!irq_ptr) free_page((unsigned long)ssqd); - } - return 0; + + return rc; } void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) @@ -277,7 +286,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) unsigned char qdioac; int rc; - rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL); + rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc); if (rc) { DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%x", rc); @@ -289,7 +298,8 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); process_ac_flags(irq_ptr, qdioac); - DBF_EVENT("qdioac:%4x", qdioac); + DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2); + DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac); } void qdio_release_memory(struct qdio_irq *irq_ptr) @@ -311,6 +321,19 @@ void qdio_release_memory(struct qdio_irq *irq_ptr) for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { q = irq_ptr->output_qs[i]; if (q) { + if (q->u.out.use_cq) { + int n; + + for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) { + struct qaob *aob = q->u.out.aobs[n]; + if (aob) { + qdio_release_aob(aob); + q->u.out.aobs[n] = NULL; + } + } + + qdio_disable_async_operation(&q->u.out); + } free_page((unsigned long) q->slib); kmem_cache_free(qdio_q_cache, q); } @@ -345,6 +368,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr, int i; irq_ptr->qdr->qfmt = qdio_init->q_format; + irq_ptr->qdr->ac = qdio_init->qdr_ac; irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ @@ -366,6 +390,8 @@ static void setup_qib(struct qdio_irq *irq_ptr, if (qebsm_possible()) irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; + irq_ptr->qib.rflags |= init_data->qib_rflags; + irq_ptr->qib.qfmt = init_data->q_format; if (init_data->no_input_qs) irq_ptr->qib.isliba = @@ -397,9 +423,8 @@ int qdio_setup_irq(struct qdio_initialize *init_data) irq_ptr->int_parm = init_data->int_parm; irq_ptr->nr_input_qs = init_data->no_input_qs; irq_ptr->nr_output_qs = init_data->no_output_qs; - - irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); irq_ptr->cdev = init_data->cdev; + ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); setup_queues(irq_ptr, init_data); setup_qib(irq_ptr, init_data); @@ -446,7 +471,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, char s[80]; snprintf(s, 80, "qdio: %s %s on SC %x using " - "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n", + "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n", dev_name(&cdev->dev), (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), @@ -458,29 +483,65 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, (irq_ptr->siga_flag.input) ? "R" : " ", (irq_ptr->siga_flag.output) ? "W" : " ", (irq_ptr->siga_flag.sync) ? "S" : " ", - (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ", - (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ", - (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); + (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ", + (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " "); printk(KERN_INFO "%s", s); } +int qdio_enable_async_operation(struct qdio_output_q *outq) +{ + outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q, + GFP_ATOMIC); + if (!outq->aobs) { + outq->use_cq = 0; + return -ENOMEM; + } + outq->use_cq = 1; + return 0; +} + +void qdio_disable_async_operation(struct qdio_output_q *q) +{ + kfree(q->aobs); + q->aobs = NULL; + q->use_cq = 0; +} + int __init qdio_setup_init(void) { + int rc; + qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 256, 0, NULL); if (!qdio_q_cache) return -ENOMEM; + qdio_aob_cache = kmem_cache_create("qdio_aob", + sizeof(struct qaob), + sizeof(struct qaob), + 0, + NULL); + if (!qdio_aob_cache) { + rc = -ENOMEM; + goto free_qdio_q_cache; + } + /* Check for OSA/FCP thin interrupts (bit 67). */ DBF_EVENT("thinint:%1d", (css_general_characteristics.aif_osa) ? 1 : 0); /* Check for QEBSM support in general (bit 58). */ DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); - return 0; + rc = 0; +out: + return rc; +free_qdio_q_cache: + kmem_cache_destroy(qdio_q_cache); + goto out; } void qdio_setup_exit(void) { + kmem_cache_destroy(qdio_aob_cache); kmem_cache_destroy(qdio_q_cache); } diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 9942c1031b2..5d06253c2a7 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -1,13 +1,13 @@ /* - * linux/drivers/s390/cio/thinint_qdio.c - * - * Copyright 2000,2009 IBM Corp. + * Copyright IBM Corp. 2000, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> * Cornelia Huck <cornelia.huck@de.ibm.com> * Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/io.h> -#include <asm/atomic.h> +#include <linux/slab.h> +#include <linux/kernel_stat.h> +#include <linux/atomic.h> #include <asm/debug.h> #include <asm/qdio.h> #include <asm/airq.h> @@ -26,33 +26,27 @@ #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) #define TIQDIO_SHARED_IND 63 -/* list of thin interrupt input queues */ -static LIST_HEAD(tiq_list); -DEFINE_MUTEX(tiq_list_lock); - -/* adapter local summary indicator */ -static unsigned char *tiqdio_alsi; - /* device state change indicators */ struct indicator_t { u32 ind; /* u32 because of compare-and-swap performance */ atomic_t count; /* use count, 0 or 1 for non-shared indicators */ }; -static struct indicator_t *q_indicators; -static int css_qdio_omit_svs; +/* list of thin interrupt input queues */ +static LIST_HEAD(tiq_list); +static DEFINE_MUTEX(tiq_list_lock); -static inline unsigned long do_clear_global_summary(void) -{ - register unsigned long __fn asm("1") = 3; - register unsigned long __tmp asm("2"); - register unsigned long __time asm("3"); - - asm volatile( - " .insn rre,0xb2650000,2,0" - : "+d" (__fn), "=d" (__tmp), "=d" (__time)); - return __time; -} +/* Adapter interrupt definitions */ +static void tiqdio_thinint_handler(struct airq_struct *airq); + +static struct airq_struct tiqdio_airq = { + .handler = tiqdio_thinint_handler, + .isc = QDIO_AIRQ_ISC, +}; + +static struct indicator_t *q_indicators; + +u64 last_ai_time; /* returns addr for the device state change indicator */ static u32 *get_indicator(void) @@ -83,146 +77,167 @@ static void put_indicator(u32 *addr) void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) { - struct qdio_q *q; - int i; - - /* No TDD facility? If we must use SIGA-s we can also omit SVS. */ - if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) - css_qdio_omit_svs = 1; - mutex_lock(&tiq_list_lock); - for_each_input_queue(irq_ptr, q, i) - list_add_rcu(&q->entry, &tiq_list); + list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); mutex_unlock(&tiq_list_lock); - xchg(irq_ptr->dsci, 1); + xchg(irq_ptr->dsci, 1 << 7); } void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) { struct qdio_q *q; - int i; - for (i = 0; i < irq_ptr->nr_input_qs; i++) { - q = irq_ptr->input_qs[i]; - /* if establish triggered an error */ - if (!q || !q->entry.prev || !q->entry.next) - continue; + q = irq_ptr->input_qs[0]; + /* if establish triggered an error */ + if (!q || !q->entry.prev || !q->entry.next) + return; - mutex_lock(&tiq_list_lock); - list_del_rcu(&q->entry); - mutex_unlock(&tiq_list_lock); - synchronize_rcu(); - } + mutex_lock(&tiq_list_lock); + list_del_rcu(&q->entry); + mutex_unlock(&tiq_list_lock); + synchronize_rcu(); } -static inline int shared_ind(struct qdio_irq *irq_ptr) +static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) +{ + return irq_ptr->nr_input_qs > 1; +} + +static inline int references_shared_dsci(struct qdio_irq *irq_ptr) { return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; } -/** - * tiqdio_thinint_handler - thin interrupt handler for qdio - * @ind: pointer to adapter local summary indicator - * @drv_data: NULL - */ -static void tiqdio_thinint_handler(void *ind, void *drv_data) +static inline int shared_ind(struct qdio_irq *irq_ptr) { - struct qdio_q *q; + return references_shared_dsci(irq_ptr) || + has_multiple_inq_on_dsci(irq_ptr); +} - /* - * SVS only when needed: issue SVS to benefit from iqdio interrupt - * avoidance (SVS clears adapter interrupt suppression overwrite) - */ - if (!css_qdio_omit_svs) - do_clear_global_summary(); +void clear_nonshared_ind(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return; + if (shared_ind(irq_ptr)) + return; + xchg(irq_ptr->dsci, 0); +} - /* - * reset local summary indicator (tiqdio_alsi) to stop adapter - * interrupts for now - */ - xchg((u8 *)ind, 0); +int test_nonshared_ind(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return 0; + if (shared_ind(irq_ptr)) + return 0; + if (*irq_ptr->dsci) + return 1; + else + return 0; +} - /* protect tiq_list entries, only changed in activate or shutdown */ - rcu_read_lock(); +static inline u32 clear_shared_ind(void) +{ + if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) + return 0; + return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); +} - /* check for work on all inbound thinint queues */ - list_for_each_entry_rcu(q, &tiq_list, entry) - /* only process queues from changed sets */ - if (*q->irq_ptr->dsci) { - qperf_inc(q, adapter_int); +static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) +{ + struct qdio_q *q; + int i; - /* only clear it if the indicator is non-shared */ + for_each_input_queue(irq, q, i) { + if (!references_shared_dsci(irq) && + has_multiple_inq_on_dsci(irq)) + xchg(q->irq_ptr->dsci, 0); + + if (q->u.in.queue_start_poll) { + /* skip if polling is enabled or already in work */ + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) { + qperf_inc(q, int_discarded); + continue; + } + + /* avoid dsci clear here, done after processing */ + q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, + q->irq_ptr->int_parm); + } else { if (!shared_ind(q->irq_ptr)) xchg(q->irq_ptr->dsci, 0); + /* - * don't call inbound processing directly since - * that could starve other thinint queues + * Call inbound processing but not directly + * since that could starve other thinint queues. */ tasklet_schedule(&q->tasklet); } + } +} - rcu_read_unlock(); +/** + * tiqdio_thinint_handler - thin interrupt handler for qdio + * @alsi: pointer to adapter local summary indicator + * @data: NULL + */ +static void tiqdio_thinint_handler(struct airq_struct *airq) +{ + u32 si_used = clear_shared_ind(); + struct qdio_q *q; - /* - * if we used the shared indicator clear it now after all queues - * were processed - */ - if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { - xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); + last_ai_time = S390_lowcore.int_clock; + inc_irq_stat(IRQIO_QAI); - /* prevent racing */ - if (*tiqdio_alsi) - xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); + /* protect tiq_list entries, only changed in activate or shutdown */ + rcu_read_lock(); + + /* check for work on all inbound thinint queues */ + list_for_each_entry_rcu(q, &tiq_list, entry) { + struct qdio_irq *irq; + + /* only process queues from changed sets */ + irq = q->irq_ptr; + if (unlikely(references_shared_dsci(irq))) { + if (!si_used) + continue; + } else if (!*irq->dsci) + continue; + + tiqdio_call_inq_handlers(irq); + + qperf_inc(q, adapter_int); } + rcu_read_unlock(); } static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) { - struct scssc_area *scssc_area; + struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; + u64 summary_indicator_addr, subchannel_indicator_addr; int rc; - scssc_area = (struct scssc_area *)irq_ptr->chsc_page; - memset(scssc_area, 0, PAGE_SIZE); - if (reset) { - scssc_area->summary_indicator_addr = 0; - scssc_area->subchannel_indicator_addr = 0; + summary_indicator_addr = 0; + subchannel_indicator_addr = 0; } else { - scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); - scssc_area->subchannel_indicator_addr = - virt_to_phys(irq_ptr->dsci); + summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr); + subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); } - scssc_area->request = (struct chsc_header) { - .length = 0x0fe0, - .code = 0x0021, - }; - scssc_area->operation_code = 0; - scssc_area->ks = PAGE_DEFAULT_KEY >> 4; - scssc_area->kc = PAGE_DEFAULT_KEY >> 4; - scssc_area->isc = QDIO_AIRQ_ISC; - scssc_area->schid = irq_ptr->schid; - - /* enable the time delay disablement facility */ - if (css_general_characteristics.aif_tdd) - scssc_area->word_with_d_bit = 0x10000000; - - rc = chsc(scssc_area); - if (rc) - return -EIO; - - rc = chsc_error_from_response(scssc_area->response.code); + rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr, + subchannel_indicator_addr); if (rc) { DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, - scssc_area->response.code); - DBF_ERROR_HEX(&scssc_area->response, sizeof(void *)); - return rc; + scssc->response.code); + goto out; } DBF_EVENT("setscind"); - DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long)); - DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long)); - return 0; + DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr)); + DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr)); +out: + return rc; } /* allocate non-shared indicators and shared indicator */ @@ -242,14 +257,12 @@ void tiqdio_free_memory(void) int __init tiqdio_register_thinints(void) { - isc_register(QDIO_AIRQ_ISC); - tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, - NULL, QDIO_AIRQ_ISC); - if (IS_ERR(tiqdio_alsi)) { - DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); - tiqdio_alsi = NULL; - isc_unregister(QDIO_AIRQ_ISC); - return -ENOMEM; + int rc; + + rc = register_adapter_interrupt(&tiqdio_airq); + if (rc) { + DBF_EVENT("RTI:%x", rc); + return rc; } return 0; } @@ -258,12 +271,6 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr) { if (!is_thinint_irq(irq_ptr)) return 0; - - /* Check for aif time delay disablement. If installed, - * omit SVS even under LPAR - */ - if (css_general_characteristics.aif_tdd) - css_qdio_omit_svs = 1; return set_subchannel_ind(irq_ptr, 0); } @@ -281,16 +288,12 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) return; /* reset adapter interrupt indicators */ - put_indicator(irq_ptr->dsci); set_subchannel_ind(irq_ptr, 1); + put_indicator(irq_ptr->dsci); } void __exit tiqdio_unregister_thinints(void) { WARN_ON(!list_empty(&tiq_list)); - - if (tiqdio_alsi) { - s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); - isc_unregister(QDIO_AIRQ_ISC); - } + unregister_adapter_interrupt(&tiqdio_airq); } diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c new file mode 100644 index 00000000000..15268edc54a --- /dev/null +++ b/drivers/s390/cio/scm.c @@ -0,0 +1,288 @@ +/* + * Recognize and maintain s390 storage class memory. + * + * Copyright IBM Corp. 2012 + * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/err.h> +#include <asm/eadm.h> +#include "chsc.h" + +static struct device *scm_root; + +#define to_scm_dev(n) container_of(n, struct scm_device, dev) +#define to_scm_drv(d) container_of(d, struct scm_driver, drv) + +static int scmdev_probe(struct device *dev) +{ + struct scm_device *scmdev = to_scm_dev(dev); + struct scm_driver *scmdrv = to_scm_drv(dev->driver); + + return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV; +} + +static int scmdev_remove(struct device *dev) +{ + struct scm_device *scmdev = to_scm_dev(dev); + struct scm_driver *scmdrv = to_scm_drv(dev->driver); + + return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV; +} + +static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + return add_uevent_var(env, "MODALIAS=scm:scmdev"); +} + +static struct bus_type scm_bus_type = { + .name = "scm", + .probe = scmdev_probe, + .remove = scmdev_remove, + .uevent = scmdev_uevent, +}; + +/** + * scm_driver_register() - register a scm driver + * @scmdrv: driver to be registered + */ +int scm_driver_register(struct scm_driver *scmdrv) +{ + struct device_driver *drv = &scmdrv->drv; + + drv->bus = &scm_bus_type; + + return driver_register(drv); +} +EXPORT_SYMBOL_GPL(scm_driver_register); + +/** + * scm_driver_unregister() - deregister a scm driver + * @scmdrv: driver to be deregistered + */ +void scm_driver_unregister(struct scm_driver *scmdrv) +{ + driver_unregister(&scmdrv->drv); +} +EXPORT_SYMBOL_GPL(scm_driver_unregister); + +void scm_irq_handler(struct aob *aob, int error) +{ + struct aob_rq_header *aobrq = (void *) aob->request.data; + struct scm_device *scmdev = aobrq->scmdev; + struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver); + + scmdrv->handler(scmdev, aobrq->data, error); +} +EXPORT_SYMBOL_GPL(scm_irq_handler); + +#define scm_attr(name) \ +static ssize_t show_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scm_device *scmdev = to_scm_dev(dev); \ + int ret; \ + \ + device_lock(dev); \ + ret = sprintf(buf, "%u\n", scmdev->attrs.name); \ + device_unlock(dev); \ + \ + return ret; \ +} \ +static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); + +scm_attr(persistence); +scm_attr(oper_state); +scm_attr(data_state); +scm_attr(rank); +scm_attr(release); +scm_attr(res_id); + +static struct attribute *scmdev_attrs[] = { + &dev_attr_persistence.attr, + &dev_attr_oper_state.attr, + &dev_attr_data_state.attr, + &dev_attr_rank.attr, + &dev_attr_release.attr, + &dev_attr_res_id.attr, + NULL, +}; + +static struct attribute_group scmdev_attr_group = { + .attrs = scmdev_attrs, +}; + +static const struct attribute_group *scmdev_attr_groups[] = { + &scmdev_attr_group, + NULL, +}; + +static void scmdev_release(struct device *dev) +{ + struct scm_device *scmdev = to_scm_dev(dev); + + kfree(scmdev); +} + +static void scmdev_setup(struct scm_device *scmdev, struct sale *sale, + unsigned int size, unsigned int max_blk_count) +{ + dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa); + scmdev->nr_max_block = max_blk_count; + scmdev->address = sale->sa; + scmdev->size = 1UL << size; + scmdev->attrs.rank = sale->rank; + scmdev->attrs.persistence = sale->p; + scmdev->attrs.oper_state = sale->op_state; + scmdev->attrs.data_state = sale->data_state; + scmdev->attrs.rank = sale->rank; + scmdev->attrs.release = sale->r; + scmdev->attrs.res_id = sale->rid; + scmdev->dev.parent = scm_root; + scmdev->dev.bus = &scm_bus_type; + scmdev->dev.release = scmdev_release; + scmdev->dev.groups = scmdev_attr_groups; +} + +/* + * Check for state-changes, notify the driver and userspace. + */ +static void scmdev_update(struct scm_device *scmdev, struct sale *sale) +{ + struct scm_driver *scmdrv; + bool changed; + + device_lock(&scmdev->dev); + changed = scmdev->attrs.rank != sale->rank || + scmdev->attrs.oper_state != sale->op_state; + scmdev->attrs.rank = sale->rank; + scmdev->attrs.oper_state = sale->op_state; + if (!scmdev->dev.driver) + goto out; + scmdrv = to_scm_drv(scmdev->dev.driver); + if (changed && scmdrv->notify) + scmdrv->notify(scmdev, SCM_CHANGE); +out: + device_unlock(&scmdev->dev); + if (changed) + kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE); +} + +static int check_address(struct device *dev, void *data) +{ + struct scm_device *scmdev = to_scm_dev(dev); + struct sale *sale = data; + + return scmdev->address == sale->sa; +} + +static struct scm_device *scmdev_find(struct sale *sale) +{ + struct device *dev; + + dev = bus_find_device(&scm_bus_type, NULL, sale, check_address); + + return dev ? to_scm_dev(dev) : NULL; +} + +static int scm_add(struct chsc_scm_info *scm_info, size_t num) +{ + struct sale *sale, *scmal = scm_info->scmal; + struct scm_device *scmdev; + int ret; + + for (sale = scmal; sale < scmal + num; sale++) { + scmdev = scmdev_find(sale); + if (scmdev) { + scmdev_update(scmdev, sale); + /* Release reference from scm_find(). */ + put_device(&scmdev->dev); + continue; + } + scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL); + if (!scmdev) + return -ENODEV; + scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc); + ret = device_register(&scmdev->dev); + if (ret) { + /* Release reference from device_initialize(). */ + put_device(&scmdev->dev); + return ret; + } + } + + return 0; +} + +int scm_update_information(void) +{ + struct chsc_scm_info *scm_info; + u64 token = 0; + size_t num; + int ret; + + scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); + if (!scm_info) + return -ENOMEM; + + do { + ret = chsc_scm_info(scm_info, token); + if (ret) + break; + + num = (scm_info->response.length - + (offsetof(struct chsc_scm_info, scmal) - + offsetof(struct chsc_scm_info, response)) + ) / sizeof(struct sale); + + ret = scm_add(scm_info, num); + if (ret) + break; + + token = scm_info->restok; + } while (token); + + free_page((unsigned long)scm_info); + + return ret; +} + +static int scm_dev_avail(struct device *dev, void *unused) +{ + struct scm_driver *scmdrv = to_scm_drv(dev->driver); + struct scm_device *scmdev = to_scm_dev(dev); + + if (dev->driver && scmdrv->notify) + scmdrv->notify(scmdev, SCM_AVAIL); + + return 0; +} + +int scm_process_availability_information(void) +{ + return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail); +} + +static int __init scm_init(void) +{ + int ret; + + ret = bus_register(&scm_bus_type); + if (ret) + return ret; + + scm_root = root_device_register("scm"); + if (IS_ERR(scm_root)) { + bus_unregister(&scm_bus_type); + return PTR_ERR(scm_root); + } + + scm_update_information(); + return 0; +} +subsys_initcall_sync(scm_init); diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index f0a12d2eb78..771faf7094d 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -2,16 +2,7 @@ # S/390 crypto devices # -ifdef CONFIG_ZCRYPT_MONOLITHIC - -z90crypt-objs := zcrypt_mono.o ap_bus.o zcrypt_api.o \ - zcrypt_pcica.o zcrypt_pcicc.o zcrypt_pcixcc.o zcrypt_cex2a.o -obj-$(CONFIG_ZCRYPT) += z90crypt.o - -else - ap-objs := ap_bus.o obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o -obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o - -endif +obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o zcrypt_cex4.o +obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 20836eff88c..4038437ff03 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1,11 +1,10 @@ /* - * linux/drivers/s390/crypto/ap_bus.c - * - * Copyright (C) 2006 IBM Corporation + * Copyright IBM Corp. 2006, 2012 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> * Felix Beck <felix.beck@de.ibm.com> + * Holger Dengler <hd@linux.vnet.ibm.com> * * Adjunct processor bus. * @@ -27,22 +26,24 @@ #define KMSG_COMPONENT "ap" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/workqueue.h> +#include <linux/slab.h> #include <linux/notifier.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <asm/reset.h> #include <asm/airq.h> -#include <asm/atomic.h> -#include <asm/system.h> +#include <linux/atomic.h> #include <asm/isc.h> #include <linux/hrtimer.h> #include <linux/ktime.h> +#include <asm/facility.h> #include "ap_bus.h" @@ -57,32 +58,35 @@ static inline void ap_schedule_poll_timer(void); static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); static int ap_device_remove(struct device *dev); static int ap_device_probe(struct device *dev); -static void ap_interrupt_handler(void *unused1, void *unused2); +static void ap_interrupt_handler(struct airq_struct *airq); static void ap_reset(struct ap_device *ap_dev); static void ap_config_timeout(unsigned long ptr); static int ap_select_domain(void); +static void ap_query_configuration(void); /* * Module description. */ MODULE_AUTHOR("IBM Corporation"); -MODULE_DESCRIPTION("Adjunct Processor Bus driver, " - "Copyright 2006 IBM Corporation"); +MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ + "Copyright IBM Corp. 2006, 2012"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("z90crypt"); /* * Module parameter */ int ap_domain_index = -1; /* Adjunct Processor Domain Index */ -module_param_named(domain, ap_domain_index, int, 0000); +module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP); MODULE_PARM_DESC(domain, "domain index for ap devices"); EXPORT_SYMBOL(ap_domain_index); static int ap_thread_flag = 0; -module_param_named(poll_thread, ap_thread_flag, int, 0000); +module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP); MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); static struct device *ap_root_device = NULL; +static struct ap_config_info *ap_configuration; static DEFINE_SPINLOCK(ap_device_list_lock); static LIST_HEAD(ap_device_list); @@ -103,7 +107,6 @@ static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); static struct task_struct *ap_poll_kthread = NULL; static DEFINE_MUTEX(ap_poll_thread_mutex); static DEFINE_SPINLOCK(ap_poll_timer_lock); -static void *ap_interrupt_indicator; static struct hrtimer ap_poll_timer; /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ @@ -117,13 +120,21 @@ static int ap_suspend_flag; static int user_set_domain = 0; static struct bus_type ap_bus_type; +/* Adapter interrupt definitions */ +static int ap_airq_flag; + +static struct airq_struct ap_airq = { + .handler = ap_interrupt_handler, + .isc = AP_ISC, +}; + /** * ap_using_interrupts() - Returns non-zero if interrupt support is * available. */ static inline int ap_using_interrupts(void) { - return ap_interrupt_indicator != NULL; + return ap_airq_flag; } /** @@ -153,15 +164,21 @@ static inline int ap_instructions_available(void) */ static int ap_interrupts_available(void) { - unsigned long long facility_bits[2]; + return test_facility(2) && test_facility(65); +} - if (stfle(facility_bits, 2) <= 1) - return 0; - if (!(facility_bits[0] & (1ULL << 61)) || - !(facility_bits[1] & (1ULL << 62))) - return 0; - return 1; +/** + * ap_configuration_available(): Test if AP configuration + * information is available. + * + * Returns 1 if AP configuration information is available. + */ +#ifdef CONFIG_64BIT +static int ap_configuration_available(void) +{ + return test_facility(2) && test_facility(12); } +#endif /** * ap_test_queue(): Test adjunct processor queue. @@ -219,7 +236,7 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind) register struct ap_queue_status reg1_out asm ("1"); register void *reg2 asm ("2") = ind; asm volatile( - ".long 0xb2af0000" /* PQAP(RAPQ) */ + ".long 0xb2af0000" /* PQAP(AQIC) */ : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) : : "cc" ); @@ -227,6 +244,96 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind) } #endif +#ifdef CONFIG_64BIT +static inline struct ap_queue_status +__ap_query_functions(ap_qid_t qid, unsigned int *functions) +{ + register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23); + register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID; + register unsigned long reg2 asm ("2"); + + asm volatile( + ".long 0xb2af0000\n" /* PQAP(TAPQ) */ + "0:\n" + EX_TABLE(0b, 0b) + : "+d" (reg0), "+d" (reg1), "=d" (reg2) + : + : "cc"); + + *functions = (unsigned int)(reg2 >> 32); + return reg1; +} +#endif + +#ifdef CONFIG_64BIT +static inline int __ap_query_configuration(struct ap_config_info *config) +{ + register unsigned long reg0 asm ("0") = 0x04000000UL; + register unsigned long reg1 asm ("1") = -EINVAL; + register unsigned char *reg2 asm ("2") = (unsigned char *)config; + + asm volatile( + ".long 0xb2af0000\n" /* PQAP(QCI) */ + "0: la %1,0\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (reg0), "+d" (reg1), "+d" (reg2) + : + : "cc"); + + return reg1; +} +#endif + +/** + * ap_query_functions(): Query supported functions. + * @qid: The AP queue number + * @functions: Pointer to functions field. + * + * Returns + * 0 on success. + * -ENODEV if queue not valid. + * -EBUSY if device busy. + * -EINVAL if query function is not supported + */ +static int ap_query_functions(ap_qid_t qid, unsigned int *functions) +{ +#ifdef CONFIG_64BIT + struct ap_queue_status status; + int i; + status = __ap_query_functions(qid, functions); + + for (i = 0; i < AP_MAX_RESET; i++) { + if (ap_queue_status_invalid_test(&status)) + return -ENODEV; + + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + return 0; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + break; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_INVALID_ADDRESS: + return -ENODEV; + case AP_RESPONSE_OTHERWISE_CHANGED: + break; + default: + break; + } + if (i < AP_MAX_RESET - 1) { + udelay(5); + status = __ap_query_functions(qid, functions); + } + } + return -EBUSY; +#else + return -EINVAL; +#endif +} + /** * ap_queue_enable_interruption(): Enable interruption on an AP. * @qid: The AP queue number @@ -253,6 +360,12 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) break; case AP_RESPONSE_RESET_IN_PROGRESS: case AP_RESPONSE_BUSY: + if (i < AP_MAX_RESET - 1) { + udelay(5); + status = ap_queue_interruption_control(qid, + ind); + continue; + } break; case AP_RESPONSE_Q_NOT_AVAIL: case AP_RESPONSE_DECONFIGURED: @@ -300,13 +413,13 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, register unsigned long reg2 asm ("2") = (unsigned long) msg; register unsigned long reg3 asm ("3") = (unsigned long) length; register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); - register unsigned long reg5 asm ("5") = (unsigned int) psmid; + register unsigned long reg5 asm ("5") = psmid & 0xffffffff; if (special == 1) reg0 |= 0x400000UL; asm volatile ( - "0: .long 0xb2ad0042\n" /* DQAP */ + "0: .long 0xb2ad0042\n" /* NQAP */ " brc 2,0b" : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) @@ -365,7 +478,7 @@ __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) asm volatile( - "0: .long 0xb2ae0064\n" + "0: .long 0xb2ae0064\n" /* DQAP */ " brc 6,0b\n" : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), @@ -478,12 +591,18 @@ static int ap_init_queue(ap_qid_t qid) if (rc != -ENODEV && rc != -EBUSY) break; if (i < AP_MAX_RESET - 1) { - udelay(5); + /* Time we are waiting until we give up (0.7sec * 90). + * Since the actual request (in progress) will not + * interrupted immediately for the reset command, + * we have to be patient. In worst case we have to + * wait 60sec + reset time (some msec). + */ + schedule_timeout(AP_RESET_TIMEOUT); status = ap_test_queue(qid, &dummy, &dummy); } } if (rc == 0 && ap_using_interrupts()) { - rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator); + rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr); /* If interruption mode is supported by the machine, * but an AP can not be enabled for interruption then * the AP will be discarded. */ @@ -568,6 +687,34 @@ static ssize_t ap_request_count_show(struct device *dev, static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); +static ssize_t ap_requestq_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + int rc; + + spin_lock_bh(&ap_dev->lock); + rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count); + spin_unlock_bh(&ap_dev->lock); + return rc; +} + +static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); + +static ssize_t ap_pendingq_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + int rc; + + spin_lock_bh(&ap_dev->lock); + rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count); + spin_unlock_bh(&ap_dev->lock); + return rc; +} + +static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); + static ssize_t ap_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -576,11 +723,23 @@ static ssize_t ap_modalias_show(struct device *dev, static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); +static ssize_t ap_functions_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions); +} + +static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); + static struct attribute *ap_dev_attrs[] = { &dev_attr_hwtype.attr, &dev_attr_depth.attr, &dev_attr_request_count.attr, + &dev_attr_requestq_count.attr, + &dev_attr_pendingq_count.attr, &dev_attr_modalias.attr, + &dev_attr_ap_functions.attr, NULL }; static struct attribute_group ap_dev_attr_group = { @@ -676,13 +835,23 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state) static int ap_bus_resume(struct device *dev) { - int rc = 0; struct ap_device *ap_dev = to_ap_dev(dev); + int rc; if (ap_suspend_flag) { ap_suspend_flag = 0; - if (!ap_interrupts_available()) - ap_interrupt_indicator = NULL; + if (ap_interrupts_available()) { + if (!ap_using_interrupts()) { + rc = register_adapter_interrupt(&ap_airq); + ap_airq_flag = (rc == 0); + } + } else { + if (ap_using_interrupts()) { + unregister_adapter_interrupt(&ap_airq); + ap_airq_flag = 0; + } + } + ap_query_configuration(); if (!user_set_domain) { ap_domain_index = -1; ap_select_domain(); @@ -702,7 +871,10 @@ static int ap_bus_resume(struct device *dev) tasklet_schedule(&ap_tasklet); if (ap_thread_flag) rc = ap_poll_thread_start(); - } + else + rc = 0; + } else + rc = 0; if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) { spin_lock_bh(&ap_dev->lock); ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid), @@ -729,10 +901,15 @@ static int ap_device_probe(struct device *dev) int rc; ap_dev->drv = ap_drv; + + spin_lock_bh(&ap_device_list_lock); + list_add(&ap_dev->list, &ap_device_list); + spin_unlock_bh(&ap_device_list_lock); + rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; - if (!rc) { + if (rc) { spin_lock_bh(&ap_device_list_lock); - list_add(&ap_dev->list, &ap_device_list); + list_del_init(&ap_dev->list); spin_unlock_bh(&ap_device_list_lock); } return rc; @@ -751,12 +928,12 @@ static void __ap_flush_queue(struct ap_device *ap_dev) list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { list_del_init(&ap_msg->list); ap_dev->pendingq_count--; - ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); + ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); } list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { list_del_init(&ap_msg->list); ap_dev->requestq_count--; - ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); + ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); } } @@ -806,6 +983,16 @@ void ap_driver_unregister(struct ap_driver *ap_drv) } EXPORT_SYMBOL(ap_driver_unregister); +void ap_bus_force_rescan(void) +{ + /* reconfigure the AP bus rescan timer. */ + mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); + /* processing a asynchronous bus rescan */ + queue_work(ap_work_queue, &ap_config_work); + flush_work(&ap_config_work); +} +EXPORT_SYMBOL(ap_bus_force_rescan); + /* * AP bus attributes. */ @@ -816,6 +1003,28 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf) static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); +static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) +{ + if (ap_configuration != NULL) { /* QCI not supported */ + if (test_facility(76)) { /* format 1 - 256 bit domain field */ + return snprintf(buf, PAGE_SIZE, + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", + ap_configuration->adm[0], ap_configuration->adm[1], + ap_configuration->adm[2], ap_configuration->adm[3], + ap_configuration->adm[4], ap_configuration->adm[5], + ap_configuration->adm[6], ap_configuration->adm[7]); + } else { /* format 0 - 16 bit domain field */ + return snprintf(buf, PAGE_SIZE, "%08x%08x\n", + ap_configuration->adm[0], ap_configuration->adm[1]); + } + } else { + return snprintf(buf, PAGE_SIZE, "not supported\n"); + } +} + +static BUS_ATTR(ap_control_domain_mask, 0444, + ap_control_domain_mask_show, NULL); + static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); @@ -901,6 +1110,7 @@ static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); static struct bus_attribute *const ap_bus_attrs[] = { &bus_attr_ap_domain, + &bus_attr_ap_control_domain_mask, &bus_attr_config_time, &bus_attr_poll_thread, &bus_attr_ap_interrupts, @@ -908,6 +1118,65 @@ static struct bus_attribute *const ap_bus_attrs[] = { NULL, }; +static inline int ap_test_config(unsigned int *field, unsigned int nr) +{ + if (nr > 0xFFu) + return 0; + return ap_test_bit((field + (nr >> 5)), (nr & 0x1f)); +} + +/* + * ap_test_config_card_id(): Test, whether an AP card ID is configured. + * @id AP card ID + * + * Returns 0 if the card is not configured + * 1 if the card is configured or + * if the configuration information is not available + */ +static inline int ap_test_config_card_id(unsigned int id) +{ + if (!ap_configuration) + return 1; + return ap_test_config(ap_configuration->apm, id); +} + +/* + * ap_test_config_domain(): Test, whether an AP usage domain is configured. + * @domain AP usage domain ID + * + * Returns 0 if the usage domain is not configured + * 1 if the usage domain is configured or + * if the configuration information is not available + */ +static inline int ap_test_config_domain(unsigned int domain) +{ + if (!ap_configuration) + return 1; + return ap_test_config(ap_configuration->aqm, domain); +} + +/** + * ap_query_configuration(): Query AP configuration information. + * + * Query information of installed cards and configured domains from AP. + */ +static void ap_query_configuration(void) +{ +#ifdef CONFIG_64BIT + if (ap_configuration_available()) { + if (!ap_configuration) + ap_configuration = + kzalloc(sizeof(struct ap_config_info), + GFP_KERNEL); + if (ap_configuration) + __ap_query_configuration(ap_configuration); + } else + ap_configuration = NULL; +#else + ap_configuration = NULL; +#endif +} + /** * ap_select_domain(): Select an AP domain. * @@ -916,6 +1185,7 @@ static struct bus_attribute *const ap_bus_attrs[] = { static int ap_select_domain(void) { int queue_depth, device_type, count, max_count, best_domain; + ap_qid_t qid; int rc, i, j; /* @@ -929,9 +1199,13 @@ static int ap_select_domain(void) best_domain = -1; max_count = 0; for (i = 0; i < AP_DOMAINS; i++) { + if (!ap_test_config_domain(i)) + continue; count = 0; for (j = 0; j < AP_DEVICES; j++) { - ap_qid_t qid = AP_MKQID(j, i); + if (!ap_test_config_card_id(j)) + continue; + qid = AP_MKQID(j, i); rc = ap_query_queue(qid, &queue_depth, &device_type); if (rc) continue; @@ -1046,8 +1320,9 @@ out: return rc; } -static void ap_interrupt_handler(void *unused1, void *unused2) +static void ap_interrupt_handler(struct airq_struct *airq) { + inc_irq_stat(IRQIO_APB); tasklet_schedule(&ap_tasklet); } @@ -1076,16 +1351,22 @@ static void ap_scan_bus(struct work_struct *unused) struct device *dev; ap_qid_t qid; int queue_depth, device_type; + unsigned int device_functions; int rc, i; - if (ap_select_domain() != 0) + ap_query_configuration(); + if (ap_select_domain() != 0) { return; + } for (i = 0; i < AP_DEVICES; i++) { qid = AP_MKQID(i, ap_domain_index); dev = bus_find_device(&ap_bus_type, NULL, (void *)(unsigned long)qid, __ap_scan_bus); - rc = ap_query_queue(qid, &queue_depth, &device_type); + if (ap_test_config_card_id(i)) + rc = ap_query_queue(qid, &queue_depth, &device_type); + else + rc = -ENODEV; if (dev) { if (rc == -EBUSY) { set_current_state(TASK_UNINTERRUPTIBLE); @@ -1124,10 +1405,23 @@ static void ap_scan_bus(struct work_struct *unused) INIT_LIST_HEAD(&ap_dev->list); setup_timer(&ap_dev->timeout, ap_request_timeout, (unsigned long) ap_dev); - if (device_type == 0) - ap_probe_device_type(ap_dev); - else + switch (device_type) { + case 0: + /* device type probing for old cards */ + if (ap_probe_device_type(ap_dev)) { + kfree(ap_dev); + continue; + } + break; + default: ap_dev->device_type = device_type; + } + + rc = ap_query_functions(qid, &device_functions); + if (!rc) + ap_dev->functions = device_functions; + else + ap_dev->functions = 0u; ap_dev->device.bus = &ap_bus_type; ap_dev->device.parent = ap_root_device; @@ -1164,18 +1458,16 @@ ap_config_timeout(unsigned long ptr) } /** - * ap_schedule_poll_timer(): Schedule poll timer. + * __ap_schedule_poll_timer(): Schedule poll timer. * * Set up the timer to run the poll tasklet */ -static inline void ap_schedule_poll_timer(void) +static inline void __ap_schedule_poll_timer(void) { ktime_t hr_time; spin_lock_bh(&ap_poll_timer_lock); - if (ap_using_interrupts() || ap_suspend_flag) - goto out; - if (hrtimer_is_queued(&ap_poll_timer)) + if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag) goto out; if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { hr_time = ktime_set(0, poll_timeout); @@ -1187,6 +1479,18 @@ out: } /** + * ap_schedule_poll_timer(): Schedule poll timer. + * + * Set up the timer to run the poll tasklet + */ +static inline void ap_schedule_poll_timer(void) +{ + if (ap_using_interrupts()) + return; + __ap_schedule_poll_timer(); +} + +/** * ap_poll_read(): Receive pending reply messages from an AP device. * @ap_dev: pointer to the AP device * @flags: pointer to control flags, bit 2^0 is set if another poll is @@ -1212,7 +1516,7 @@ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) continue; list_del_init(&ap_msg->list); ap_dev->pendingq_count--; - ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply); + ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); break; } if (ap_dev->queue_count > 0) @@ -1267,8 +1571,9 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) *flags |= 1; *flags |= 2; break; - case AP_RESPONSE_Q_FULL: case AP_RESPONSE_RESET_IN_PROGRESS: + __ap_schedule_poll_timer(); + case AP_RESPONSE_Q_FULL: *flags |= 2; break; case AP_RESPONSE_MESSAGE_TOO_BIG: @@ -1332,10 +1637,10 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms return -EBUSY; case AP_RESPONSE_REQ_FAC_NOT_INST: case AP_RESPONSE_MESSAGE_TOO_BIG: - ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); + ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); return -EINVAL; default: /* Device is gone. */ - ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); + ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); return -ENODEV; } } else { @@ -1353,6 +1658,10 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) unsigned long flags; int rc; + /* For asynchronous message handling a valid receive-callback + * is required. */ + BUG_ON(!ap_msg->receive); + spin_lock_bh(&ap_dev->lock); if (!ap_dev->unregistered) { /* Make room on the queue by polling for finished requests. */ @@ -1364,7 +1673,7 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) if (rc == -ENODEV) ap_dev->unregistered = 1; } else { - ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); + ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); rc = -ENODEV; } spin_unlock_bh(&ap_dev->lock); @@ -1434,6 +1743,8 @@ static void ap_reset(struct ap_device *ap_dev) rc = ap_init_queue(ap_dev->qid); if (rc == -ENODEV) ap_dev->unregistered = 1; + else + __ap_schedule_poll_timer(); } static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) @@ -1465,7 +1776,7 @@ static void ap_poll_all(unsigned long dummy) * important that no requests on any AP get lost. */ if (ap_using_interrupts()) - xchg((u8 *)ap_interrupt_indicator, 0); + xchg(ap_airq.lsi_ptr, 0); do { flags = 0; spin_lock(&ap_device_list_lock); @@ -1497,7 +1808,7 @@ static int ap_poll_thread(void *data) int requests; struct ap_device *ap_dev; - set_user_nice(current, 19); + set_user_nice(current, MAX_NICE); while (1) { if (ap_suspend_flag) return 0; @@ -1538,7 +1849,7 @@ static int ap_poll_thread_start(void) mutex_lock(&ap_poll_thread_mutex); if (!ap_poll_kthread) { ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); - rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0; + rc = PTR_RET(ap_poll_kthread); if (rc) ap_poll_kthread = NULL; } @@ -1624,13 +1935,8 @@ int __init ap_module_init(void) return -ENODEV; } if (ap_interrupts_available()) { - isc_register(AP_ISC); - ap_interrupt_indicator = s390_register_adapter_interrupt( - &ap_interrupt_handler, NULL, AP_ISC); - if (IS_ERR(ap_interrupt_indicator)) { - ap_interrupt_indicator = NULL; - isc_unregister(AP_ISC); - } + rc = register_adapter_interrupt(&ap_airq); + ap_airq_flag = (rc == 0); } register_reset_call(&ap_reset_call); @@ -1647,7 +1953,7 @@ int __init ap_module_init(void) /* Create /sys/devices/ap. */ ap_root_device = root_device_register("ap"); - rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0; + rc = PTR_RET(ap_root_device); if (rc) goto out_bus; @@ -1657,6 +1963,7 @@ int __init ap_module_init(void) goto out_root; } + ap_query_configuration(); if (ap_select_domain() == 0) ap_scan_bus(NULL); @@ -1697,10 +2004,8 @@ out_bus: bus_unregister(&ap_bus_type); out: unregister_reset_call(&ap_reset_call); - if (ap_using_interrupts()) { - s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); - isc_unregister(AP_ISC); - } + if (ap_using_interrupts()) + unregister_adapter_interrupt(&ap_airq); return rc; } @@ -1736,13 +2041,9 @@ void ap_module_exit(void) bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); bus_unregister(&ap_bus_type); unregister_reset_call(&ap_reset_call); - if (ap_using_interrupts()) { - s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); - isc_unregister(AP_ISC); - } + if (ap_using_interrupts()) + unregister_adapter_interrupt(&ap_airq); } -#ifndef CONFIG_ZCRYPT_MONOLITHIC module_init(ap_module_init); module_exit(ap_module_exit); -#endif diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 4785d07cd44..6405ae24a7a 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -1,11 +1,10 @@ /* - * linux/drivers/s390/crypto/ap_bus.h - * - * Copyright (C) 2006 IBM Corporation + * Copyright IBM Corp. 2006, 2012 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> * Felix Beck <felix.beck@de.ibm.com> + * Holger Dengler <hd@linux.vnet.ibm.com> * * Adjunct processor bus header file. * @@ -34,7 +33,7 @@ #define AP_DEVICES 64 /* Number of AP devices. */ #define AP_DOMAINS 16 /* Number of AP domains. */ #define AP_MAX_RESET 90 /* Maximum number of resets. */ -#define AP_RESET_TIMEOUT (HZ/2) /* Time in ticks for reset timeouts. */ +#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */ #define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ #define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ @@ -72,7 +71,25 @@ struct ap_queue_status { unsigned int int_enabled : 1; unsigned int response_code : 8; unsigned int pad2 : 16; -}; +} __packed; + +#define AP_QUEUE_STATUS_INVALID \ + { 1, 1, 1, 0xF, 1, 0xFF, 0xFFFF } + +static inline +int ap_queue_status_invalid_test(struct ap_queue_status *status) +{ + struct ap_queue_status invalid = AP_QUEUE_STATUS_INVALID; + return !(memcmp(status, &invalid, sizeof(struct ap_queue_status))); +} + +#define AP_MAX_BITS 31 +static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) +{ + if (nr > AP_MAX_BITS) + return 0; + return (*ptr & (0x80000000u >> nr)) != 0; +} #define AP_RESPONSE_NORMAL 0x00 #define AP_RESPONSE_Q_NOT_AVAIL 0x01 @@ -99,6 +116,17 @@ struct ap_queue_status { #define AP_DEVICE_TYPE_CEX2C 7 #define AP_DEVICE_TYPE_CEX3A 8 #define AP_DEVICE_TYPE_CEX3C 9 +#define AP_DEVICE_TYPE_CEX4 10 + +/* + * Known function facilities + */ +#define AP_FUNC_MEX4K 1 +#define AP_FUNC_CRT4K 2 +#define AP_FUNC_COPRO 3 +#define AP_FUNC_ACCEL 4 +#define AP_FUNC_EP11 5 +#define AP_FUNC_APXA 6 /* * AP reset flag states @@ -116,9 +144,6 @@ struct ap_driver { int (*probe)(struct ap_device *); void (*remove)(struct ap_device *); - /* receive is called from tasklet context */ - void (*receive)(struct ap_device *, struct ap_message *, - struct ap_message *); int request_timeout; /* request timeout in jiffies */ }; @@ -136,6 +161,7 @@ struct ap_device { ap_qid_t qid; /* AP queue id. */ int queue_depth; /* AP queue depth.*/ int device_type; /* AP device type. */ + unsigned int functions; /* AP device function bitfield. */ int unregistered; /* marks AP device as unregistered */ struct timer_list timeout; /* Timer for request timeouts. */ int reset; /* Reset required after req. timeout. */ @@ -163,8 +189,22 @@ struct ap_message { void *private; /* ap driver private pointer. */ unsigned int special:1; /* Used for special commands. */ + /* receive is called from tasklet context */ + void (*receive)(struct ap_device *, struct ap_message *, + struct ap_message *); }; +struct ap_config_info { + unsigned int special_command:1; + unsigned int ap_extended:1; + unsigned char reserved1:6; + unsigned char reserved2[15]; + unsigned int apm[8]; /* AP ID mask */ + unsigned int aqm[8]; /* AP queue mask */ + unsigned int adm[8]; /* AP domain mask */ + unsigned char reserved4[16]; +} __packed; + #define AP_DEVICE(dt) \ .dev_type=(dt), \ .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, @@ -179,6 +219,7 @@ static inline void ap_init_message(struct ap_message *ap_msg) ap_msg->psmid = 0; ap_msg->length = 0; ap_msg->special = 0; + ap_msg->receive = NULL; } /* @@ -192,6 +233,7 @@ int ap_recv(ap_qid_t, unsigned long long *, void *, size_t); void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); void ap_flush_queue(struct ap_device *ap_dev); +void ap_bus_force_rescan(void); int ap_module_init(void); void ap_module_exit(void); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index ba50fe02e57..0e18c5dcd91 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/crypto/zcrypt_api.c - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2012 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * Cornelia Huck <cornelia.huck@de.ibm.com> @@ -11,6 +9,7 @@ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -35,29 +34,45 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/compat.h> -#include <linux/smp_lock.h> -#include <asm/atomic.h> +#include <linux/slab.h> +#include <linux/atomic.h> #include <asm/uaccess.h> #include <linux/hw_random.h> +#include <linux/debugfs.h> +#include <asm/debug.h> +#include "zcrypt_debug.h" #include "zcrypt_api.h" +#include "zcrypt_msgtype6.h" + /* * Module description. */ MODULE_AUTHOR("IBM Corporation"); -MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " - "Copyright 2001, 2006 IBM Corporation"); +MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ + "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); static DEFINE_SPINLOCK(zcrypt_device_lock); static LIST_HEAD(zcrypt_device_list); static int zcrypt_device_count = 0; static atomic_t zcrypt_open_count = ATOMIC_INIT(0); +static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); + +atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); +EXPORT_SYMBOL(zcrypt_rescan_req); static int zcrypt_rng_device_add(void); static void zcrypt_rng_device_remove(void); +static DEFINE_SPINLOCK(zcrypt_ops_list_lock); +static LIST_HEAD(zcrypt_ops_list); + +static debug_info_t *zcrypt_dbf_common; +static debug_info_t *zcrypt_dbf_devices; +static struct dentry *debugfs_root; + /* * Device attributes common for all crypto devices. */ @@ -87,6 +102,8 @@ static ssize_t zcrypt_online_store(struct device *dev, if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) return -EINVAL; zdev->online = online; + ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid, + zdev->online); if (!online) ap_flush_queue(zdev->ap_dev); return count; @@ -105,6 +122,24 @@ static struct attribute_group zcrypt_device_attr_group = { }; /** + * Process a rescan of the transport layer. + * + * Returns 1, if the rescan has been processed, otherwise 0. + */ +static inline int zcrypt_process_rescan(void) +{ + if (atomic_read(&zcrypt_rescan_req)) { + atomic_set(&zcrypt_rescan_req, 0); + atomic_inc(&zcrypt_rescan_count); + ap_bus_force_rescan(); + ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d", + atomic_inc_return(&zcrypt_rescan_count)); + return 1; + } + return 0; +} + +/** * __zcrypt_increase_preference(): Increase preference of a crypto device. * @zdev: Pointer the crypto device * @@ -192,6 +227,7 @@ struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) zdev->reply.length = max_response_size; spin_lock_init(&zdev->lock); INIT_LIST_HEAD(&zdev->list); + zdev->dbf_area = zcrypt_dbf_devices; return zdev; out_free: @@ -217,6 +253,8 @@ int zcrypt_device_register(struct zcrypt_device *zdev) { int rc; + if (!zdev->ops) + return -ENODEV; rc = sysfs_create_group(&zdev->ap_dev->device.kobj, &zcrypt_device_attr_group); if (rc) @@ -225,6 +263,8 @@ int zcrypt_device_register(struct zcrypt_device *zdev) kref_init(&zdev->refcount); spin_lock_bh(&zcrypt_device_lock); zdev->online = 1; /* New devices are online by default. */ + ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid, + zdev->online); list_add_tail(&zdev->list, &zcrypt_device_list); __zcrypt_increase_preference(zdev); zcrypt_device_count++; @@ -271,6 +311,67 @@ void zcrypt_device_unregister(struct zcrypt_device *zdev) } EXPORT_SYMBOL(zcrypt_device_unregister); +void zcrypt_msgtype_register(struct zcrypt_ops *zops) +{ + if (zops->owner) { + spin_lock_bh(&zcrypt_ops_list_lock); + list_add_tail(&zops->list, &zcrypt_ops_list); + spin_unlock_bh(&zcrypt_ops_list_lock); + } +} +EXPORT_SYMBOL(zcrypt_msgtype_register); + +void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) +{ + spin_lock_bh(&zcrypt_ops_list_lock); + list_del_init(&zops->list); + spin_unlock_bh(&zcrypt_ops_list_lock); +} +EXPORT_SYMBOL(zcrypt_msgtype_unregister); + +static inline +struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant) +{ + struct zcrypt_ops *zops; + int found = 0; + + spin_lock_bh(&zcrypt_ops_list_lock); + list_for_each_entry(zops, &zcrypt_ops_list, list) { + if ((zops->variant == variant) && + (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) { + found = 1; + break; + } + } + spin_unlock_bh(&zcrypt_ops_list_lock); + + if (!found) + return NULL; + return zops; +} + +struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant) +{ + struct zcrypt_ops *zops = NULL; + + zops = __ops_lookup(name, variant); + if (!zops) { + request_module("%s", name); + zops = __ops_lookup(name, variant); + } + if ((!zops) || (!try_module_get(zops->owner))) + return NULL; + return zops; +} +EXPORT_SYMBOL(zcrypt_msgtype_request); + +void zcrypt_msgtype_release(struct zcrypt_ops *zops) +{ + if (zops) + module_put(zops->owner); +} +EXPORT_SYMBOL(zcrypt_msgtype_release); + /** * zcrypt_read (): Not supported beyond zcrypt 1.3.1. * @@ -301,7 +402,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf, static int zcrypt_open(struct inode *inode, struct file *filp) { atomic_inc(&zcrypt_open_count); - return 0; + return nonseekable_open(inode, filp); } /** @@ -396,8 +497,15 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) if (copied == 0) { unsigned int len; spin_unlock_bh(&zcrypt_device_lock); - /* len is max 256 / 2 - 120 = 8 */ - len = crt->inputdatalength / 2 - 120; + /* len is max 256 / 2 - 120 = 8 + * For bigger device just assume len of leading + * 0s is 8 as stated in the requirements for + * ica_rsa_modexpo_crt struct in zcrypt.h. + */ + if (crt->inputdatalength <= 256) + len = crt->inputdatalength / 2 - 120; + else + len = 8; if (len > sizeof(z1)) return -EFAULT; z1 = z2 = z3 = 0; @@ -405,6 +513,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) copy_from_user(&z2, crt->bp_key, len) || copy_from_user(&z3, crt->u_mult_inv, len)) return -EFAULT; + z1 = z2 = z3 = 0; copied = 1; /* * We have to restart device lookup - @@ -447,9 +556,9 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB) spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { if (!zdev->online || !zdev->ops->send_cprb || - (xcRB->user_defined != AUTOSELECT && - AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined) - ) + (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) || + (xcRB->user_defined != AUTOSELECT && + AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)) continue; zcrypt_device_get(zdev); get_device(&zdev->ap_dev->device); @@ -474,6 +583,90 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB) return -ENODEV; } +struct ep11_target_dev_list { + unsigned short targets_num; + struct ep11_target_dev *targets; +}; + +static bool is_desired_ep11dev(unsigned int dev_qid, + struct ep11_target_dev_list dev_list) +{ + int n; + + for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) { + if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) && + (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) { + return true; + } + } + return false; +} + +static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) +{ + struct zcrypt_device *zdev; + bool autoselect = false; + int rc; + struct ep11_target_dev_list ep11_dev_list = { + .targets_num = 0x00, + .targets = NULL, + }; + + ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num; + + /* empty list indicates autoselect (all available targets) */ + if (ep11_dev_list.targets_num == 0) + autoselect = true; + else { + ep11_dev_list.targets = kcalloc((unsigned short) + xcrb->targets_num, + sizeof(struct ep11_target_dev), + GFP_KERNEL); + if (!ep11_dev_list.targets) + return -ENOMEM; + + if (copy_from_user(ep11_dev_list.targets, + (struct ep11_target_dev __force __user *) + xcrb->targets, xcrb->targets_num * + sizeof(struct ep11_target_dev))) + return -EFAULT; + } + + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) { + /* check if device is eligible */ + if (!zdev->online || + zdev->ops->variant != MSGTYPE06_VARIANT_EP11) + continue; + + /* check if device is selected as valid target */ + if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) && + !autoselect) + continue; + + zcrypt_device_get(zdev); + get_device(&zdev->ap_dev->device); + zdev->request_count++; + __zcrypt_decrease_preference(zdev); + if (try_module_get(zdev->ap_dev->drv->driver.owner)) { + spin_unlock_bh(&zcrypt_device_lock); + rc = zdev->ops->send_ep11_cprb(zdev, xcrb); + spin_lock_bh(&zcrypt_device_lock); + module_put(zdev->ap_dev->drv->driver.owner); + } else { + rc = -EAGAIN; + } + zdev->request_count--; + __zcrypt_increase_preference(zdev); + put_device(&zdev->ap_dev->device); + zcrypt_device_put(zdev); + spin_unlock_bh(&zcrypt_device_lock); + return rc; + } + spin_unlock_bh(&zcrypt_device_lock); + return -ENODEV; +} + static long zcrypt_rng(char *buffer) { struct zcrypt_device *zdev; @@ -634,6 +827,11 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, do { rc = zcrypt_rsa_modexpo(&mex); } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_rsa_modexpo(&mex); + } while (rc == -EAGAIN); if (rc) return rc; return put_user(mex.outputdatalength, &umex->outputdatalength); @@ -646,6 +844,11 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, do { rc = zcrypt_rsa_crt(&crt); } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_rsa_crt(&crt); + } while (rc == -EAGAIN); if (rc) return rc; return put_user(crt.outputdatalength, &ucrt->outputdatalength); @@ -658,10 +861,32 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, do { rc = zcrypt_send_cprb(&xcRB); } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_send_cprb(&xcRB); + } while (rc == -EAGAIN); if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) return -EFAULT; return rc; } + case ZSENDEP11CPRB: { + struct ep11_urb __user *uxcrb = (void __user *)arg; + struct ep11_urb xcrb; + if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) + return -EFAULT; + do { + rc = zcrypt_send_ep11_cprb(&xcrb); + } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_send_ep11_cprb(&xcrb); + } while (rc == -EAGAIN); + if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) + return -EFAULT; + return rc; + } case Z90STAT_STATUS_MASK: { char status[AP_DEVICES]; zcrypt_status_mask(status); @@ -764,10 +989,15 @@ static long trans_modexpo32(struct file *filp, unsigned int cmd, do { rc = zcrypt_rsa_modexpo(&mex64); } while (rc == -EAGAIN); - if (!rc) - rc = put_user(mex64.outputdatalength, - &umex32->outputdatalength); - return rc; + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_rsa_modexpo(&mex64); + } while (rc == -EAGAIN); + if (rc) + return rc; + return put_user(mex64.outputdatalength, + &umex32->outputdatalength); } struct compat_ica_rsa_modexpo_crt { @@ -804,10 +1034,15 @@ static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, do { rc = zcrypt_rsa_crt(&crt64); } while (rc == -EAGAIN); - if (!rc) - rc = put_user(crt64.outputdatalength, - &ucrt32->outputdatalength); - return rc; + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_rsa_crt(&crt64); + } while (rc == -EAGAIN); + if (rc) + return rc; + return put_user(crt64.outputdatalength, + &ucrt32->outputdatalength); } struct compat_ica_xcRB { @@ -863,6 +1098,11 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd, do { rc = zcrypt_send_cprb(&xcRB64); } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_send_cprb(&xcRB64); + } while (rc == -EAGAIN); xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; xcRB32.reply_data_length = xcRB64.reply_data_length; xcRB32.status = xcRB64.status; @@ -896,7 +1136,8 @@ static const struct file_operations zcrypt_fops = { .compat_ioctl = zcrypt_compat_ioctl, #endif .open = zcrypt_open, - .release = zcrypt_release + .release = zcrypt_release, + .llseek = no_llseek, }; /* @@ -1119,6 +1360,9 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) */ if (zcrypt_rng_buffer_index == 0) { rc = zcrypt_rng((char *) zcrypt_rng_buffer); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + rc = zcrypt_rng((char *) zcrypt_rng_buffer); if (rc < 0) return -EIO; zcrypt_rng_buffer_index = rc / sizeof *data; @@ -1171,6 +1415,30 @@ static void zcrypt_rng_device_remove(void) mutex_unlock(&zcrypt_rng_mutex); } +int __init zcrypt_debug_init(void) +{ + debugfs_root = debugfs_create_dir("zcrypt", NULL); + + zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16); + debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view); + debug_set_level(zcrypt_dbf_common, DBF_ERR); + + zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16); + debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view); + debug_set_level(zcrypt_dbf_devices, DBF_ERR); + + return 0; +} + +void zcrypt_debug_exit(void) +{ + debugfs_remove(debugfs_root); + if (zcrypt_dbf_common) + debug_unregister(zcrypt_dbf_common); + if (zcrypt_dbf_devices) + debug_unregister(zcrypt_dbf_devices); +} + /** * zcrypt_api_init(): Module initialization. * @@ -1180,6 +1448,12 @@ int __init zcrypt_api_init(void) { int rc; + rc = zcrypt_debug_init(); + if (rc) + goto out; + + atomic_set(&zcrypt_rescan_req, 0); + /* Register the request sprayer. */ rc = misc_register(&zcrypt_misc_device); if (rc < 0) @@ -1209,9 +1483,8 @@ void zcrypt_api_exit(void) { remove_proc_entry("driver/z90crypt", NULL); misc_deregister(&zcrypt_misc_device); + zcrypt_debug_exit(); } -#ifndef CONFIG_ZCRYPT_MONOLITHIC module_init(zcrypt_api_init); module_exit(zcrypt_api_exit); -#endif diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 8e7ffbf2466..b3d496bfaa7 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/crypto/zcrypt_api.h - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2012 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * Cornelia Huck <cornelia.huck@de.ibm.com> @@ -11,6 +9,7 @@ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -30,8 +29,10 @@ #ifndef _ZCRYPT_API_H_ #define _ZCRYPT_API_H_ -#include "ap_bus.h" +#include <linux/atomic.h> +#include <asm/debug.h> #include <asm/zcrypt.h> +#include "ap_bus.h" /* deprecated status calls */ #define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status) @@ -73,10 +74,11 @@ struct ica_z90_status { #define ZCRYPT_CEX2A 6 #define ZCRYPT_CEX3C 7 #define ZCRYPT_CEX3A 8 +#define ZCRYPT_CEX4 10 /** * Large random numbers are pulled in 4096 byte chunks from the crypto cards - * and stored in a page. Be carefull when increasing this buffer due to size + * and stored in a page. Be careful when increasing this buffer due to size * limitations for AP requests. */ #define ZCRYPT_RNG_BUFFER_SIZE 4096 @@ -88,7 +90,11 @@ struct zcrypt_ops { long (*rsa_modexpo_crt)(struct zcrypt_device *, struct ica_rsa_modexpo_crt *); long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); + long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *); long (*rng)(struct zcrypt_device *, char *); + struct list_head list; /* zcrypt ops list. */ + struct module *owner; + int variant; }; struct zcrypt_device { @@ -109,14 +115,24 @@ struct zcrypt_device { int request_count; /* # current requests. */ struct ap_message reply; /* Per-device reply structure. */ + int max_exp_bit_length; + + debug_info_t *dbf_area; /* debugging */ }; +/* transport layer rescanning */ +extern atomic_t zcrypt_rescan_req; + struct zcrypt_device *zcrypt_device_alloc(size_t); void zcrypt_device_free(struct zcrypt_device *); void zcrypt_device_get(struct zcrypt_device *); int zcrypt_device_put(struct zcrypt_device *); int zcrypt_device_register(struct zcrypt_device *); void zcrypt_device_unregister(struct zcrypt_device *); +void zcrypt_msgtype_register(struct zcrypt_ops *); +void zcrypt_msgtype_unregister(struct zcrypt_ops *); +struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int); +void zcrypt_msgtype_release(struct zcrypt_ops *); int zcrypt_api_init(void); void zcrypt_api_exit(void); diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h index ed82f2f59b1..1f42f103c76 100644 --- a/drivers/s390/crypto/zcrypt_cca_key.h +++ b/drivers/s390/crypto/zcrypt_cca_key.h @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/crypto/zcrypt_cca_key.h - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2006 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index c6fb0aa8950..1e849d6e1df 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -1,15 +1,14 @@ /* - * linux/drivers/s390/crypto/zcrypt_cex2a.c - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2012 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,20 +26,22 @@ */ #include <linux/module.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/err.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/uaccess.h> #include "ap_bus.h" #include "zcrypt_api.h" #include "zcrypt_error.h" #include "zcrypt_cex2a.h" +#include "zcrypt_msgtype50.h" #define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ #define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ #define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE -#define CEX3A_MAX_MOD_SIZE CEX2A_MAX_MOD_SIZE +#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */ #define CEX2A_SPEED_RATING 970 #define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */ @@ -48,8 +49,10 @@ #define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ #define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ -#define CEX3A_MAX_MESSAGE_SIZE CEX2A_MAX_MESSAGE_SIZE -#define CEX3A_MAX_RESPONSE_SIZE CEX2A_MAX_RESPONSE_SIZE +#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus + * (max outputdatalength) + + * type80_hdr*/ +#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg) #define CEX2A_CLEANUP_TIME (15*HZ) #define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME @@ -60,322 +63,23 @@ static struct ap_device_id zcrypt_cex2a_ids[] = { { /* end of list */ }, }; -#ifndef CONFIG_ZCRYPT_MONOLITHIC MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids); MODULE_AUTHOR("IBM Corporation"); -MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " - "Copyright 2001, 2006 IBM Corporation"); +MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \ + "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); -#endif static int zcrypt_cex2a_probe(struct ap_device *ap_dev); static void zcrypt_cex2a_remove(struct ap_device *ap_dev); -static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *, - struct ap_message *); static struct ap_driver zcrypt_cex2a_driver = { .probe = zcrypt_cex2a_probe, .remove = zcrypt_cex2a_remove, - .receive = zcrypt_cex2a_receive, .ids = zcrypt_cex2a_ids, .request_timeout = CEX2A_CLEANUP_TIME, }; /** - * Convert a ICAMEX message to a type50 MEX message. - * - * @zdev: crypto device pointer - * @zreq: crypto request pointer - * @mex: pointer to user input data - * - * Returns 0 on success or -EFAULT. - */ -static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, - struct ap_message *ap_msg, - struct ica_rsa_modexpo *mex) -{ - unsigned char *mod, *exp, *inp; - int mod_len; - - mod_len = mex->inputdatalength; - - if (mod_len <= 128) { - struct type50_meb1_msg *meb1 = ap_msg->message; - memset(meb1, 0, sizeof(*meb1)); - ap_msg->length = sizeof(*meb1); - meb1->header.msg_type_code = TYPE50_TYPE_CODE; - meb1->header.msg_len = sizeof(*meb1); - meb1->keyblock_type = TYPE50_MEB1_FMT; - mod = meb1->modulus + sizeof(meb1->modulus) - mod_len; - exp = meb1->exponent + sizeof(meb1->exponent) - mod_len; - inp = meb1->message + sizeof(meb1->message) - mod_len; - } else { - struct type50_meb2_msg *meb2 = ap_msg->message; - memset(meb2, 0, sizeof(*meb2)); - ap_msg->length = sizeof(*meb2); - meb2->header.msg_type_code = TYPE50_TYPE_CODE; - meb2->header.msg_len = sizeof(*meb2); - meb2->keyblock_type = TYPE50_MEB2_FMT; - mod = meb2->modulus + sizeof(meb2->modulus) - mod_len; - exp = meb2->exponent + sizeof(meb2->exponent) - mod_len; - inp = meb2->message + sizeof(meb2->message) - mod_len; - } - - if (copy_from_user(mod, mex->n_modulus, mod_len) || - copy_from_user(exp, mex->b_key, mod_len) || - copy_from_user(inp, mex->inputdata, mod_len)) - return -EFAULT; - return 0; -} - -/** - * Convert a ICACRT message to a type50 CRT message. - * - * @zdev: crypto device pointer - * @zreq: crypto request pointer - * @crt: pointer to user input data - * - * Returns 0 on success or -EFAULT. - */ -static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, - struct ap_message *ap_msg, - struct ica_rsa_modexpo_crt *crt) -{ - int mod_len, short_len, long_len, long_offset; - unsigned char *p, *q, *dp, *dq, *u, *inp; - - mod_len = crt->inputdatalength; - short_len = mod_len / 2; - long_len = mod_len / 2 + 8; - - /* - * CEX2A cannot handle p, dp, or U > 128 bytes. - * If we have one of these, we need to do extra checking. - */ - if (long_len > 128) { - /* - * zcrypt_rsa_crt already checked for the leading - * zeroes of np_prime, bp_key and u_mult_inc. - */ - long_offset = long_len - 128; - long_len = 128; - } else - long_offset = 0; - - /* - * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use - * the larger message structure. - */ - if (long_len <= 64) { - struct type50_crb1_msg *crb1 = ap_msg->message; - memset(crb1, 0, sizeof(*crb1)); - ap_msg->length = sizeof(*crb1); - crb1->header.msg_type_code = TYPE50_TYPE_CODE; - crb1->header.msg_len = sizeof(*crb1); - crb1->keyblock_type = TYPE50_CRB1_FMT; - p = crb1->p + sizeof(crb1->p) - long_len; - q = crb1->q + sizeof(crb1->q) - short_len; - dp = crb1->dp + sizeof(crb1->dp) - long_len; - dq = crb1->dq + sizeof(crb1->dq) - short_len; - u = crb1->u + sizeof(crb1->u) - long_len; - inp = crb1->message + sizeof(crb1->message) - mod_len; - } else { - struct type50_crb2_msg *crb2 = ap_msg->message; - memset(crb2, 0, sizeof(*crb2)); - ap_msg->length = sizeof(*crb2); - crb2->header.msg_type_code = TYPE50_TYPE_CODE; - crb2->header.msg_len = sizeof(*crb2); - crb2->keyblock_type = TYPE50_CRB2_FMT; - p = crb2->p + sizeof(crb2->p) - long_len; - q = crb2->q + sizeof(crb2->q) - short_len; - dp = crb2->dp + sizeof(crb2->dp) - long_len; - dq = crb2->dq + sizeof(crb2->dq) - short_len; - u = crb2->u + sizeof(crb2->u) - long_len; - inp = crb2->message + sizeof(crb2->message) - mod_len; - } - - if (copy_from_user(p, crt->np_prime + long_offset, long_len) || - copy_from_user(q, crt->nq_prime, short_len) || - copy_from_user(dp, crt->bp_key + long_offset, long_len) || - copy_from_user(dq, crt->bq_key, short_len) || - copy_from_user(u, crt->u_mult_inv + long_offset, long_len) || - copy_from_user(inp, crt->inputdata, mod_len)) - return -EFAULT; - - - return 0; -} - -/** - * Copy results from a type 80 reply message back to user space. - * - * @zdev: crypto device pointer - * @reply: reply AP message. - * @data: pointer to user output data - * @length: size of user output data - * - * Returns 0 on success or -EFAULT. - */ -static int convert_type80(struct zcrypt_device *zdev, - struct ap_message *reply, - char __user *outputdata, - unsigned int outputdatalength) -{ - struct type80_hdr *t80h = reply->message; - unsigned char *data; - - if (t80h->len < sizeof(*t80h) + outputdatalength) { - /* The result is too short, the CEX2A card may not do that.. */ - zdev->online = 0; - return -EAGAIN; /* repeat the request on a different device. */ - } - BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); - data = reply->message + t80h->len - outputdatalength; - if (copy_to_user(outputdata, data, outputdatalength)) - return -EFAULT; - return 0; -} - -static int convert_response(struct zcrypt_device *zdev, - struct ap_message *reply, - char __user *outputdata, - unsigned int outputdatalength) -{ - /* Response type byte is the second byte in the response. */ - switch (((unsigned char *) reply->message)[1]) { - case TYPE82_RSP_CODE: - case TYPE88_RSP_CODE: - return convert_error(zdev, reply); - case TYPE80_RSP_CODE: - return convert_type80(zdev, reply, - outputdata, outputdatalength); - default: /* Unknown response type, this should NEVER EVER happen */ - zdev->online = 0; - return -EAGAIN; /* repeat the request on a different device. */ - } -} - -/** - * This function is called from the AP bus code after a crypto request - * "msg" has finished with the reply message "reply". - * It is called from tasklet context. - * @ap_dev: pointer to the AP device - * @msg: pointer to the AP message - * @reply: pointer to the AP reply message - */ -static void zcrypt_cex2a_receive(struct ap_device *ap_dev, - struct ap_message *msg, - struct ap_message *reply) -{ - static struct error_hdr error_reply = { - .type = TYPE82_RSP_CODE, - .reply_code = REP82_ERROR_MACHINE_FAILURE, - }; - struct type80_hdr *t80h; - int length; - - /* Copy the reply message to the request message buffer. */ - if (IS_ERR(reply)) { - memcpy(msg->message, &error_reply, sizeof(error_reply)); - goto out; - } - t80h = reply->message; - if (t80h->type == TYPE80_RSP_CODE) { - length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); - memcpy(msg->message, reply->message, length); - } else - memcpy(msg->message, reply->message, sizeof error_reply); -out: - complete((struct completion *) msg->private); -} - -static atomic_t zcrypt_step = ATOMIC_INIT(0); - -/** - * The request distributor calls this function if it picked the CEX2A - * device to handle a modexpo request. - * @zdev: pointer to zcrypt_device structure that identifies the - * CEX2A device to the request distributor - * @mex: pointer to the modexpo request buffer - */ -static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, - struct ica_rsa_modexpo *mex) -{ - struct ap_message ap_msg; - struct completion work; - int rc; - - ap_init_message(&ap_msg); - ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); - if (!ap_msg.message) - return -ENOMEM; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &work; - rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex); - if (rc) - goto out_free; - init_completion(&work); - ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible(&work); - if (rc == 0) - rc = convert_response(zdev, &ap_msg, mex->outputdata, - mex->outputdatalength); - else - /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); -out_free: - kfree(ap_msg.message); - return rc; -} - -/** - * The request distributor calls this function if it picked the CEX2A - * device to handle a modexpo_crt request. - * @zdev: pointer to zcrypt_device structure that identifies the - * CEX2A device to the request distributor - * @crt: pointer to the modexpoc_crt request buffer - */ -static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, - struct ica_rsa_modexpo_crt *crt) -{ - struct ap_message ap_msg; - struct completion work; - int rc; - - ap_init_message(&ap_msg); - ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); - if (!ap_msg.message) - return -ENOMEM; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &work; - rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt); - if (rc) - goto out_free; - init_completion(&work); - ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible(&work); - if (rc == 0) - rc = convert_response(zdev, &ap_msg, crt->outputdata, - crt->outputdatalength); - else - /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); -out_free: - kfree(ap_msg.message); - return rc; -} - -/** - * The crypto operations for a CEX2A card. - */ -static struct zcrypt_ops zcrypt_cex2a_ops = { - .rsa_modexpo = zcrypt_cex2a_modexpo, - .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, -}; - -/** * Probe function for CEX2A cards. It always accepts the AP device * since the bus_match already checked the hardware type. * @ap_dev: pointer to the AP device. @@ -396,6 +100,7 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev) zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; zdev->short_crt = 1; zdev->speed_rating = CEX2A_SPEED_RATING; + zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; break; case AP_DEVICE_TYPE_CEX3A: zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE); @@ -403,22 +108,30 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev) return -ENOMEM; zdev->user_space_type = ZCRYPT_CEX3A; zdev->type_string = "CEX3A"; - zdev->min_mod_size = CEX3A_MIN_MOD_SIZE; - zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; + zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; + zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; + zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; + if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) && + ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) { + zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; + zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; + } zdev->short_crt = 1; zdev->speed_rating = CEX3A_SPEED_RATING; break; } - if (zdev != NULL) { - zdev->ap_dev = ap_dev; - zdev->ops = &zcrypt_cex2a_ops; - zdev->online = 1; - ap_dev->reply = &zdev->reply; - ap_dev->private = zdev; - rc = zcrypt_device_register(zdev); - } + if (!zdev) + return -ENODEV; + zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME, + MSGTYPE50_VARIANT_DEFAULT); + zdev->ap_dev = ap_dev; + zdev->online = 1; + ap_dev->reply = &zdev->reply; + ap_dev->private = zdev; + rc = zcrypt_device_register(zdev); if (rc) { ap_dev->private = NULL; + zcrypt_msgtype_release(zdev->ops); zcrypt_device_free(zdev); } return rc; @@ -431,8 +144,10 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev) static void zcrypt_cex2a_remove(struct ap_device *ap_dev) { struct zcrypt_device *zdev = ap_dev->private; + struct zcrypt_ops *zops = zdev->ops; zcrypt_device_unregister(zdev); + zcrypt_msgtype_release(zops); } int __init zcrypt_cex2a_init(void) @@ -445,7 +160,5 @@ void __exit zcrypt_cex2a_exit(void) ap_driver_unregister(&zcrypt_cex2a_driver); } -#ifndef CONFIG_ZCRYPT_MONOLITHIC module_init(zcrypt_cex2a_init); module_exit(zcrypt_cex2a_exit); -#endif diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h index 8f69d1dacab..0dce4b9af18 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.h +++ b/drivers/s390/crypto/zcrypt_cex2a.h @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/crypto/zcrypt_cex2a.h - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2006 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * @@ -51,8 +49,10 @@ struct type50_hdr { #define TYPE50_MEB1_FMT 0x0001 #define TYPE50_MEB2_FMT 0x0002 +#define TYPE50_MEB3_FMT 0x0003 #define TYPE50_CRB1_FMT 0x0011 #define TYPE50_CRB2_FMT 0x0012 +#define TYPE50_CRB3_FMT 0x0013 /* Mod-Exp, with a small modulus */ struct type50_meb1_msg { @@ -74,6 +74,16 @@ struct type50_meb2_msg { unsigned char message[256]; } __attribute__((packed)); +/* Mod-Exp, with a larger modulus */ +struct type50_meb3_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0003 */ + unsigned char reserved[6]; + unsigned char exponent[512]; + unsigned char modulus[512]; + unsigned char message[512]; +} __attribute__((packed)); + /* CRT, with a small modulus */ struct type50_crb1_msg { struct type50_hdr header; @@ -100,6 +110,19 @@ struct type50_crb2_msg { unsigned char message[256]; } __attribute__((packed)); +/* CRT, with a larger modulus */ +struct type50_crb3_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0013 */ + unsigned char reserved[6]; + unsigned char p[256]; + unsigned char q[256]; + unsigned char dp[256]; + unsigned char dq[256]; + unsigned char u[256]; + unsigned char message[512]; +} __attribute__((packed)); + /** * The type 80 response family is associated with a CEX2A card. * diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c new file mode 100644 index 00000000000..569f8b1d86c --- /dev/null +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -0,0 +1,167 @@ +/* + * Copyright IBM Corp. 2012 + * Author(s): Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_msgtype6.h" +#include "zcrypt_msgtype50.h" +#include "zcrypt_error.h" +#include "zcrypt_cex4.h" + +#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */ +#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */ +#define CEX4A_MAX_MOD_SIZE_4K 512 /* 4096 bits */ + +#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */ +#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */ + +#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */ +#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */ + +#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE +#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE + +/* Waiting time for requests to be processed. + * Currently there are some types of request which are not deterministic. + * But the maximum time limit managed by the stomper code is set to 60sec. + * Hence we have to wait at least that time period. + */ +#define CEX4_CLEANUP_TIME (61*HZ) + +static struct ap_device_id zcrypt_cex4_ids[] = { + { AP_DEVICE(AP_DEVICE_TYPE_CEX4) }, + { /* end of list */ }, +}; + +MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \ + "Copyright IBM Corp. 2012"); +MODULE_LICENSE("GPL"); + +static int zcrypt_cex4_probe(struct ap_device *ap_dev); +static void zcrypt_cex4_remove(struct ap_device *ap_dev); + +static struct ap_driver zcrypt_cex4_driver = { + .probe = zcrypt_cex4_probe, + .remove = zcrypt_cex4_remove, + .ids = zcrypt_cex4_ids, + .request_timeout = CEX4_CLEANUP_TIME, +}; + +/** + * Probe function for CEX4 cards. It always accepts the AP device + * since the bus_match already checked the hardware type. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_cex4_probe(struct ap_device *ap_dev) +{ + struct zcrypt_device *zdev = NULL; + int rc = 0; + + switch (ap_dev->device_type) { + case AP_DEVICE_TYPE_CEX4: + if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) { + zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE); + if (!zdev) + return -ENOMEM; + zdev->type_string = "CEX4A"; + zdev->user_space_type = ZCRYPT_CEX3A; + zdev->min_mod_size = CEX4A_MIN_MOD_SIZE; + if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) && + ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) { + zdev->max_mod_size = + CEX4A_MAX_MOD_SIZE_4K; + zdev->max_exp_bit_length = + CEX4A_MAX_MOD_SIZE_4K; + } else { + zdev->max_mod_size = + CEX4A_MAX_MOD_SIZE_2K; + zdev->max_exp_bit_length = + CEX4A_MAX_MOD_SIZE_2K; + } + zdev->short_crt = 1; + zdev->speed_rating = CEX4A_SPEED_RATING; + zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME, + MSGTYPE50_VARIANT_DEFAULT); + } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) { + zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE); + if (!zdev) + return -ENOMEM; + zdev->type_string = "CEX4C"; + zdev->user_space_type = ZCRYPT_CEX3C; + zdev->min_mod_size = CEX4C_MIN_MOD_SIZE; + zdev->max_mod_size = CEX4C_MAX_MOD_SIZE; + zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; + zdev->short_crt = 0; + zdev->speed_rating = CEX4C_SPEED_RATING; + zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_DEFAULT); + } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) { + zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE); + if (!zdev) + return -ENOMEM; + zdev->type_string = "CEX4P"; + zdev->user_space_type = ZCRYPT_CEX4; + zdev->min_mod_size = CEX4C_MIN_MOD_SIZE; + zdev->max_mod_size = CEX4C_MAX_MOD_SIZE; + zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; + zdev->short_crt = 0; + zdev->speed_rating = CEX4C_SPEED_RATING; + zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_EP11); + } + break; + } + if (!zdev) + return -ENODEV; + zdev->ap_dev = ap_dev; + zdev->online = 1; + ap_dev->reply = &zdev->reply; + ap_dev->private = zdev; + rc = zcrypt_device_register(zdev); + if (rc) { + zcrypt_msgtype_release(zdev->ops); + ap_dev->private = NULL; + zcrypt_device_free(zdev); + } + return rc; +} + +/** + * This is called to remove the extended CEX4 driver information + * if an AP device is removed. + */ +static void zcrypt_cex4_remove(struct ap_device *ap_dev) +{ + struct zcrypt_device *zdev = ap_dev->private; + struct zcrypt_ops *zops; + + if (zdev) { + zops = zdev->ops; + zcrypt_device_unregister(zdev); + zcrypt_msgtype_release(zops); + } +} + +int __init zcrypt_cex4_init(void) +{ + return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4"); +} + +void __exit zcrypt_cex4_exit(void) +{ + ap_driver_unregister(&zcrypt_cex4_driver); +} + +module_init(zcrypt_cex4_init); +module_exit(zcrypt_cex4_exit); diff --git a/drivers/s390/crypto/zcrypt_cex4.h b/drivers/s390/crypto/zcrypt_cex4.h new file mode 100644 index 00000000000..719571375cc --- /dev/null +++ b/drivers/s390/crypto/zcrypt_cex4.h @@ -0,0 +1,12 @@ +/* + * Copyright IBM Corp. 2012 + * Author(s): Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#ifndef _ZCRYPT_CEX4_H_ +#define _ZCRYPT_CEX4_H_ + +int zcrypt_cex4_init(void); +void zcrypt_cex4_exit(void); + +#endif /* _ZCRYPT_CEX4_H_ */ diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h new file mode 100644 index 00000000000..28d9349de1a --- /dev/null +++ b/drivers/s390/crypto/zcrypt_debug.h @@ -0,0 +1,53 @@ +/* + * Copyright IBM Corp. 2012 + * Author(s): Holger Dengler (hd@linux.vnet.ibm.com) + */ +#ifndef ZCRYPT_DEBUG_H +#define ZCRYPT_DEBUG_H + +#include <asm/debug.h> +#include "zcrypt_api.h" + +/* that gives us 15 characters in the text event views */ +#define ZCRYPT_DBF_LEN 16 + +#define DBF_ERR 3 /* error conditions */ +#define DBF_WARN 4 /* warning conditions */ +#define DBF_INFO 6 /* informational */ + +#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) + +#define ZCRYPT_DBF_COMMON(level, text...) \ + do { \ + if (debug_level_enabled(zcrypt_dbf_common, level)) { \ + char debug_buffer[ZCRYPT_DBF_LEN]; \ + snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ + debug_text_event(zcrypt_dbf_common, level, \ + debug_buffer); \ + } \ + } while (0) + +#define ZCRYPT_DBF_DEVICES(level, text...) \ + do { \ + if (debug_level_enabled(zcrypt_dbf_devices, level)) { \ + char debug_buffer[ZCRYPT_DBF_LEN]; \ + snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ + debug_text_event(zcrypt_dbf_devices, level, \ + debug_buffer); \ + } \ + } while (0) + +#define ZCRYPT_DBF_DEV(level, device, text...) \ + do { \ + if (debug_level_enabled(device->dbf_area, level)) { \ + char debug_buffer[ZCRYPT_DBF_LEN]; \ + snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ + debug_text_event(device->dbf_area, level, \ + debug_buffer); \ + } \ + } while (0) + +int zcrypt_debug_init(void); +void zcrypt_debug_exit(void); + +#endif /* ZCRYPT_DEBUG_H */ diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index 03ba27f05f9..7b23f43c7b0 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/crypto/zcrypt_error.h - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2006 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * @@ -28,6 +26,8 @@ #ifndef _ZCRYPT_ERROR_H_ #define _ZCRYPT_ERROR_H_ +#include <linux/atomic.h> +#include "zcrypt_debug.h" #include "zcrypt_api.h" /** @@ -106,20 +106,33 @@ static inline int convert_error(struct zcrypt_device *zdev, // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A /* * To sent a message of the wrong type is a bug in the - * device driver. Warn about it, disable the device + * device driver. Send error msg, disable the device * and then repeat the request. */ - WARN_ON(1); + atomic_set(&zcrypt_rescan_req, 1); zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, ehdr->reply_code); return -EAGAIN; case REP82_ERROR_TRANSPORT_FAIL: case REP82_ERROR_MACHINE_FAILURE: // REP88_ERROR_MODULE_FAILURE // '10' CEX2A /* If a card fails disable it and repeat the request. */ + atomic_set(&zcrypt_rescan_req, 1); zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, ehdr->reply_code); return -EAGAIN; default: zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, ehdr->reply_code); return -EAGAIN; /* repeat the request on a different device. */ } } diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c deleted file mode 100644 index 44253fdd413..00000000000 --- a/drivers/s390/crypto/zcrypt_mono.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * linux/drivers/s390/crypto/zcrypt_mono.c - * - * zcrypt 2.1.0 - * - * Copyright (C) 2001, 2006 IBM Corporation - * Author(s): Robert Burroughs - * Eric Rossman (edrossma@us.ibm.com) - * - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) - * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/miscdevice.h> -#include <linux/fs.h> -#include <linux/proc_fs.h> -#include <linux/compat.h> -#include <asm/atomic.h> -#include <asm/uaccess.h> - -#include "ap_bus.h" -#include "zcrypt_api.h" -#include "zcrypt_pcica.h" -#include "zcrypt_pcicc.h" -#include "zcrypt_pcixcc.h" -#include "zcrypt_cex2a.h" - -/** - * The module initialization code. - */ -static int __init zcrypt_init(void) -{ - int rc; - - rc = ap_module_init(); - if (rc) - goto out; - rc = zcrypt_api_init(); - if (rc) - goto out_ap; - rc = zcrypt_pcica_init(); - if (rc) - goto out_api; - rc = zcrypt_pcicc_init(); - if (rc) - goto out_pcica; - rc = zcrypt_pcixcc_init(); - if (rc) - goto out_pcicc; - rc = zcrypt_cex2a_init(); - if (rc) - goto out_pcixcc; - return 0; - -out_pcixcc: - zcrypt_pcixcc_exit(); -out_pcicc: - zcrypt_pcicc_exit(); -out_pcica: - zcrypt_pcica_exit(); -out_api: - zcrypt_api_exit(); -out_ap: - ap_module_exit(); -out: - return rc; -} - -/** - * The module termination code. - */ -static void __exit zcrypt_exit(void) -{ - zcrypt_cex2a_exit(); - zcrypt_pcixcc_exit(); - zcrypt_pcicc_exit(); - zcrypt_pcica_exit(); - zcrypt_api_exit(); - ap_module_exit(); -} - -module_init(zcrypt_init); -module_exit(zcrypt_exit); diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c new file mode 100644 index 00000000000..334e282f255 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -0,0 +1,529 @@ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_error.h" +#include "zcrypt_msgtype50.h" + +#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */ + +#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ + +#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus + * (max outputdatalength) + + * type80_hdr*/ + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \ + "Copyright IBM Corp. 2001, 2012"); +MODULE_LICENSE("GPL"); + +static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *, + struct ap_message *); + +/** + * The type 50 message family is associated with a CEX2A card. + * + * The four members of the family are described below. + * + * Note that all unsigned char arrays are right-justified and left-padded + * with zeroes. + * + * Note that all reserved fields must be zeroes. + */ +struct type50_hdr { + unsigned char reserved1; + unsigned char msg_type_code; /* 0x50 */ + unsigned short msg_len; + unsigned char reserved2; + unsigned char ignored; + unsigned short reserved3; +} __packed; + +#define TYPE50_TYPE_CODE 0x50 + +#define TYPE50_MEB1_FMT 0x0001 +#define TYPE50_MEB2_FMT 0x0002 +#define TYPE50_MEB3_FMT 0x0003 +#define TYPE50_CRB1_FMT 0x0011 +#define TYPE50_CRB2_FMT 0x0012 +#define TYPE50_CRB3_FMT 0x0013 + +/* Mod-Exp, with a small modulus */ +struct type50_meb1_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0001 */ + unsigned char reserved[6]; + unsigned char exponent[128]; + unsigned char modulus[128]; + unsigned char message[128]; +} __packed; + +/* Mod-Exp, with a large modulus */ +struct type50_meb2_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0002 */ + unsigned char reserved[6]; + unsigned char exponent[256]; + unsigned char modulus[256]; + unsigned char message[256]; +} __packed; + +/* Mod-Exp, with a larger modulus */ +struct type50_meb3_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0003 */ + unsigned char reserved[6]; + unsigned char exponent[512]; + unsigned char modulus[512]; + unsigned char message[512]; +} __packed; + +/* CRT, with a small modulus */ +struct type50_crb1_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0011 */ + unsigned char reserved[6]; + unsigned char p[64]; + unsigned char q[64]; + unsigned char dp[64]; + unsigned char dq[64]; + unsigned char u[64]; + unsigned char message[128]; +} __packed; + +/* CRT, with a large modulus */ +struct type50_crb2_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0012 */ + unsigned char reserved[6]; + unsigned char p[128]; + unsigned char q[128]; + unsigned char dp[128]; + unsigned char dq[128]; + unsigned char u[128]; + unsigned char message[256]; +} __packed; + +/* CRT, with a larger modulus */ +struct type50_crb3_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0013 */ + unsigned char reserved[6]; + unsigned char p[256]; + unsigned char q[256]; + unsigned char dp[256]; + unsigned char dq[256]; + unsigned char u[256]; + unsigned char message[512]; +} __packed; + +/** + * The type 80 response family is associated with a CEX2A card. + * + * Note that all unsigned char arrays are right-justified and left-padded + * with zeroes. + * + * Note that all reserved fields must be zeroes. + */ + +#define TYPE80_RSP_CODE 0x80 + +struct type80_hdr { + unsigned char reserved1; + unsigned char type; /* 0x80 */ + unsigned short len; + unsigned char code; /* 0x00 */ + unsigned char reserved2[3]; + unsigned char reserved3[8]; +} __packed; + +/** + * Convert a ICAMEX message to a type50 MEX message. + * + * @zdev: crypto device pointer + * @zreq: crypto request pointer + * @mex: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo *mex) +{ + unsigned char *mod, *exp, *inp; + int mod_len; + + mod_len = mex->inputdatalength; + + if (mod_len <= 128) { + struct type50_meb1_msg *meb1 = ap_msg->message; + memset(meb1, 0, sizeof(*meb1)); + ap_msg->length = sizeof(*meb1); + meb1->header.msg_type_code = TYPE50_TYPE_CODE; + meb1->header.msg_len = sizeof(*meb1); + meb1->keyblock_type = TYPE50_MEB1_FMT; + mod = meb1->modulus + sizeof(meb1->modulus) - mod_len; + exp = meb1->exponent + sizeof(meb1->exponent) - mod_len; + inp = meb1->message + sizeof(meb1->message) - mod_len; + } else if (mod_len <= 256) { + struct type50_meb2_msg *meb2 = ap_msg->message; + memset(meb2, 0, sizeof(*meb2)); + ap_msg->length = sizeof(*meb2); + meb2->header.msg_type_code = TYPE50_TYPE_CODE; + meb2->header.msg_len = sizeof(*meb2); + meb2->keyblock_type = TYPE50_MEB2_FMT; + mod = meb2->modulus + sizeof(meb2->modulus) - mod_len; + exp = meb2->exponent + sizeof(meb2->exponent) - mod_len; + inp = meb2->message + sizeof(meb2->message) - mod_len; + } else { + /* mod_len > 256 = 4096 bit RSA Key */ + struct type50_meb3_msg *meb3 = ap_msg->message; + memset(meb3, 0, sizeof(*meb3)); + ap_msg->length = sizeof(*meb3); + meb3->header.msg_type_code = TYPE50_TYPE_CODE; + meb3->header.msg_len = sizeof(*meb3); + meb3->keyblock_type = TYPE50_MEB3_FMT; + mod = meb3->modulus + sizeof(meb3->modulus) - mod_len; + exp = meb3->exponent + sizeof(meb3->exponent) - mod_len; + inp = meb3->message + sizeof(meb3->message) - mod_len; + } + + if (copy_from_user(mod, mex->n_modulus, mod_len) || + copy_from_user(exp, mex->b_key, mod_len) || + copy_from_user(inp, mex->inputdata, mod_len)) + return -EFAULT; + return 0; +} + +/** + * Convert a ICACRT message to a type50 CRT message. + * + * @zdev: crypto device pointer + * @zreq: crypto request pointer + * @crt: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo_crt *crt) +{ + int mod_len, short_len; + unsigned char *p, *q, *dp, *dq, *u, *inp; + + mod_len = crt->inputdatalength; + short_len = mod_len / 2; + + /* + * CEX2A and CEX3A w/o FW update can handle requests up to + * 256 byte modulus (2k keys). + * CEX3A with FW update and CEX4A cards are able to handle + * 512 byte modulus (4k keys). + */ + if (mod_len <= 128) { /* up to 1024 bit key size */ + struct type50_crb1_msg *crb1 = ap_msg->message; + memset(crb1, 0, sizeof(*crb1)); + ap_msg->length = sizeof(*crb1); + crb1->header.msg_type_code = TYPE50_TYPE_CODE; + crb1->header.msg_len = sizeof(*crb1); + crb1->keyblock_type = TYPE50_CRB1_FMT; + p = crb1->p + sizeof(crb1->p) - short_len; + q = crb1->q + sizeof(crb1->q) - short_len; + dp = crb1->dp + sizeof(crb1->dp) - short_len; + dq = crb1->dq + sizeof(crb1->dq) - short_len; + u = crb1->u + sizeof(crb1->u) - short_len; + inp = crb1->message + sizeof(crb1->message) - mod_len; + } else if (mod_len <= 256) { /* up to 2048 bit key size */ + struct type50_crb2_msg *crb2 = ap_msg->message; + memset(crb2, 0, sizeof(*crb2)); + ap_msg->length = sizeof(*crb2); + crb2->header.msg_type_code = TYPE50_TYPE_CODE; + crb2->header.msg_len = sizeof(*crb2); + crb2->keyblock_type = TYPE50_CRB2_FMT; + p = crb2->p + sizeof(crb2->p) - short_len; + q = crb2->q + sizeof(crb2->q) - short_len; + dp = crb2->dp + sizeof(crb2->dp) - short_len; + dq = crb2->dq + sizeof(crb2->dq) - short_len; + u = crb2->u + sizeof(crb2->u) - short_len; + inp = crb2->message + sizeof(crb2->message) - mod_len; + } else if ((mod_len <= 512) && /* up to 4096 bit key size */ + (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */ + struct type50_crb3_msg *crb3 = ap_msg->message; + memset(crb3, 0, sizeof(*crb3)); + ap_msg->length = sizeof(*crb3); + crb3->header.msg_type_code = TYPE50_TYPE_CODE; + crb3->header.msg_len = sizeof(*crb3); + crb3->keyblock_type = TYPE50_CRB3_FMT; + p = crb3->p + sizeof(crb3->p) - short_len; + q = crb3->q + sizeof(crb3->q) - short_len; + dp = crb3->dp + sizeof(crb3->dp) - short_len; + dq = crb3->dq + sizeof(crb3->dq) - short_len; + u = crb3->u + sizeof(crb3->u) - short_len; + inp = crb3->message + sizeof(crb3->message) - mod_len; + } else + return -EINVAL; + + /* + * correct the offset of p, bp and mult_inv according zcrypt.h + * block size right aligned (skip the first byte) + */ + if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) || + copy_from_user(q, crt->nq_prime, short_len) || + copy_from_user(dp, crt->bp_key + MSGTYPE_ADJUSTMENT, short_len) || + copy_from_user(dq, crt->bq_key, short_len) || + copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) || + copy_from_user(inp, crt->inputdata, mod_len)) + return -EFAULT; + + return 0; +} + +/** + * Copy results from a type 80 reply message back to user space. + * + * @zdev: crypto device pointer + * @reply: reply AP message. + * @data: pointer to user output data + * @length: size of user output data + * + * Returns 0 on success or -EFAULT. + */ +static int convert_type80(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + struct type80_hdr *t80h = reply->message; + unsigned char *data; + + if (t80h->len < sizeof(*t80h) + outputdatalength) { + /* The result is too short, the CEX2A card may not do that.. */ + zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, t80h->code); + + return -EAGAIN; /* repeat the request on a different device. */ + } + if (zdev->user_space_type == ZCRYPT_CEX2A) + BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); + else + BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE); + data = reply->message + t80h->len - outputdatalength; + if (copy_to_user(outputdata, data, outputdatalength)) + return -EFAULT; + return 0; +} + +static int convert_response(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + /* Response type byte is the second byte in the response. */ + switch (((unsigned char *) reply->message)[1]) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + return convert_error(zdev, reply); + case TYPE80_RSP_CODE: + return convert_type80(zdev, reply, + outputdata, outputdatalength); + default: /* Unknown response type, this should NEVER EVER happen */ + zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @ap_dev: pointer to the AP device + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_cex2a_receive(struct ap_device *ap_dev, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct type80_hdr *t80h; + int length; + + /* Copy the reply message to the request message buffer. */ + if (IS_ERR(reply)) { + memcpy(msg->message, &error_reply, sizeof(error_reply)); + goto out; + } + t80h = reply->message; + if (t80h->type == TYPE80_RSP_CODE) { + if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A) + length = min_t(int, + CEX2A_MAX_RESPONSE_SIZE, t80h->len); + else + length = min_t(int, + CEX3A_MAX_RESPONSE_SIZE, t80h->len); + memcpy(msg->message, reply->message, length); + } else + memcpy(msg->message, reply->message, sizeof(error_reply)); +out: + complete((struct completion *) msg->private); +} + +static atomic_t zcrypt_step = ATOMIC_INIT(0); + +/** + * The request distributor calls this function if it picked the CEX2A + * device to handle a modexpo request. + * @zdev: pointer to zcrypt_device structure that identifies the + * CEX2A device to the request distributor + * @mex: pointer to the modexpo request buffer + */ +static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, + struct ica_rsa_modexpo *mex) +{ + struct ap_message ap_msg; + struct completion work; + int rc; + + ap_init_message(&ap_msg); + if (zdev->user_space_type == ZCRYPT_CEX2A) + ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, + GFP_KERNEL); + else + ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, + GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_cex2a_receive; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &work; + rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex); + if (rc) + goto out_free; + init_completion(&work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible(&work); + if (rc == 0) + rc = convert_response(zdev, &ap_msg, mex->outputdata, + mex->outputdatalength); + else + /* Signal pending. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); +out_free: + kfree(ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the CEX2A + * device to handle a modexpo_crt request. + * @zdev: pointer to zcrypt_device structure that identifies the + * CEX2A device to the request distributor + * @crt: pointer to the modexpoc_crt request buffer + */ +static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, + struct ica_rsa_modexpo_crt *crt) +{ + struct ap_message ap_msg; + struct completion work; + int rc; + + ap_init_message(&ap_msg); + if (zdev->user_space_type == ZCRYPT_CEX2A) + ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, + GFP_KERNEL); + else + ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, + GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_cex2a_receive; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &work; + rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt); + if (rc) + goto out_free; + init_completion(&work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible(&work); + if (rc == 0) + rc = convert_response(zdev, &ap_msg, crt->outputdata, + crt->outputdatalength); + else + /* Signal pending. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); +out_free: + kfree(ap_msg.message); + return rc; +} + +/** + * The crypto operations for message type 50. + */ +static struct zcrypt_ops zcrypt_msgtype50_ops = { + .rsa_modexpo = zcrypt_cex2a_modexpo, + .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, + .owner = THIS_MODULE, + .variant = MSGTYPE50_VARIANT_DEFAULT, +}; + +int __init zcrypt_msgtype50_init(void) +{ + zcrypt_msgtype_register(&zcrypt_msgtype50_ops); + return 0; +} + +void __exit zcrypt_msgtype50_exit(void) +{ + zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops); +} + +module_init(zcrypt_msgtype50_init); +module_exit(zcrypt_msgtype50_exit); diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h new file mode 100644 index 00000000000..0a66e4aeeb5 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_msgtype50.h @@ -0,0 +1,41 @@ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _ZCRYPT_MSGTYPE50_H_ +#define _ZCRYPT_MSGTYPE50_H_ + +#define MSGTYPE50_NAME "zcrypt_msgtype50" +#define MSGTYPE50_VARIANT_DEFAULT 0 + +#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /*sizeof(struct type50_crb2_msg)*/ +#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /*sizeof(struct type50_crb3_msg)*/ + +#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/ + +int zcrypt_msgtype50_init(void); +void zcrypt_msgtype50_exit(void); + +#endif /* _ZCRYPT_MSGTYPE50_H_ */ diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c new file mode 100644 index 00000000000..46b324ce6c7 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -0,0 +1,1152 @@ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_error.h" +#include "zcrypt_msgtype6.h" +#include "zcrypt_cca_key.h" + +#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ +#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ + +#define CEIL4(x) ((((x)+3)/4)*4) + +struct response_type { + struct completion work; + int type; +}; +#define PCIXCC_RESPONSE_TYPE_ICA 0 +#define PCIXCC_RESPONSE_TYPE_XCRB 1 +#define PCIXCC_RESPONSE_TYPE_EP11 2 + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \ + "Copyright IBM Corp. 2001, 2012"); +MODULE_LICENSE("GPL"); + +static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *, + struct ap_message *); + +/** + * CPRB + * Note that all shorts, ints and longs are little-endian. + * All pointer fields are 32-bits long, and mean nothing + * + * A request CPRB is followed by a request_parameter_block. + * + * The request (or reply) parameter block is organized thus: + * function code + * VUD block + * key block + */ +struct CPRB { + unsigned short cprb_len; /* CPRB length */ + unsigned char cprb_ver_id; /* CPRB version id. */ + unsigned char pad_000; /* Alignment pad byte. */ + unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */ + unsigned char srpi_verb; /* SRPI verb type */ + unsigned char flags; /* flags */ + unsigned char func_id[2]; /* function id */ + unsigned char checkpoint_flag; /* */ + unsigned char resv2; /* reserved */ + unsigned short req_parml; /* request parameter buffer */ + /* length 16-bit little endian */ + unsigned char req_parmp[4]; /* request parameter buffer * + * pointer (means nothing: the * + * parameter buffer follows * + * the CPRB). */ + unsigned char req_datal[4]; /* request data buffer */ + /* length ULELONG */ + unsigned char req_datap[4]; /* request data buffer */ + /* pointer */ + unsigned short rpl_parml; /* reply parameter buffer */ + /* length 16-bit little endian */ + unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */ + unsigned char rpl_parmp[4]; /* reply parameter buffer * + * pointer (means nothing: the * + * parameter buffer follows * + * the CPRB). */ + unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */ + unsigned char rpl_datap[4]; /* reply data buffer */ + /* pointer */ + unsigned short ccp_rscode; /* server reason code ULESHORT */ + unsigned short ccp_rtcode; /* server return code ULESHORT */ + unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/ + unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */ + unsigned char repd_datal[4]; /* replied data length ULELONG */ + unsigned char req_pc[2]; /* PC identifier */ + unsigned char res_origin[8]; /* resource origin */ + unsigned char mac_value[8]; /* Mac Value */ + unsigned char logon_id[8]; /* Logon Identifier */ + unsigned char usage_domain[2]; /* cdx */ + unsigned char resv3[18]; /* reserved for requestor */ + unsigned short svr_namel; /* server name length ULESHORT */ + unsigned char svr_name[8]; /* server name */ +} __packed; + +struct function_and_rules_block { + unsigned char function_code[2]; + unsigned short ulen; + unsigned char only_rule[8]; +} __packed; + +/** + * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C + * card in a type6 message. The 3 fields that must be filled in at execution + * time are req_parml, rpl_parml and usage_domain. + * Everything about this interface is ascii/big-endian, since the + * device does *not* have 'Intel inside'. + * + * The CPRBX is followed immediately by the parm block. + * The parm block contains: + * - function code ('PD' 0x5044 or 'PK' 0x504B) + * - rule block (one of:) + * + 0x000A 'PKCS-1.2' (MCL2 'PD') + * + 0x000A 'ZERO-PAD' (MCL2 'PK') + * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD') + * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK') + * - VUD block + */ +static struct CPRBX static_cprbx = { + .cprb_len = 0x00DC, + .cprb_ver_id = 0x02, + .func_id = {0x54, 0x32}, +}; + +/** + * Convert a ICAMEX message to a type6 MEX message. + * + * @zdev: crypto device pointer + * @ap_msg: pointer to AP message + * @mex: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo *mex) +{ + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + .agent_id = {'C', 'A',}, + .function_code = {'P', 'K'}, + }; + static struct function_and_rules_block static_pke_fnr = { + .function_code = {'P', 'K'}, + .ulen = 10, + .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '} + }; + static struct function_and_rules_block static_pke_fnr_MCL2 = { + .function_code = {'P', 'K'}, + .ulen = 10, + .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} + }; + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + struct function_and_rules_block fr; + unsigned short length; + char text[0]; + } __packed * msg = ap_msg->message; + int size; + + /* VUD.ciphertext */ + msg->length = mex->inputdatalength + 2; + if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength)) + return -EFAULT; + + /* Set up key which is located after the variable length text. */ + size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1); + if (size < 0) + return size; + size += sizeof(*msg) + mex->inputdatalength; + + /* message header, cprbx and f&r */ + msg->hdr = static_type6_hdrX; + msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); + msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); + + msg->cprbx = static_cprbx; + msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); + msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; + + msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? + static_pke_fnr_MCL2 : static_pke_fnr; + + msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx); + + ap_msg->length = size; + return 0; +} + +/** + * Convert a ICACRT message to a type6 CRT message. + * + * @zdev: crypto device pointer + * @ap_msg: pointer to AP message + * @crt: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo_crt *crt) +{ + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + .agent_id = {'C', 'A',}, + .function_code = {'P', 'D'}, + }; + static struct function_and_rules_block static_pkd_fnr = { + .function_code = {'P', 'D'}, + .ulen = 10, + .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} + }; + + static struct function_and_rules_block static_pkd_fnr_MCL2 = { + .function_code = {'P', 'D'}, + .ulen = 10, + .only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'} + }; + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + struct function_and_rules_block fr; + unsigned short length; + char text[0]; + } __packed * msg = ap_msg->message; + int size; + + /* VUD.ciphertext */ + msg->length = crt->inputdatalength + 2; + if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength)) + return -EFAULT; + + /* Set up key which is located after the variable length text. */ + size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1); + if (size < 0) + return size; + size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ + + /* message header, cprbx and f&r */ + msg->hdr = static_type6_hdrX; + msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); + msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); + + msg->cprbx = static_cprbx; + msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); + msg->cprbx.req_parml = msg->cprbx.rpl_msgbl = + size - sizeof(msg->hdr) - sizeof(msg->cprbx); + + msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? + static_pkd_fnr_MCL2 : static_pkd_fnr; + + ap_msg->length = size; + return 0; +} + +/** + * Convert a XCRB message to a type6 CPRB message. + * + * @zdev: crypto device pointer + * @ap_msg: pointer to AP message + * @xcRB: pointer to user input data + * + * Returns 0 on success or -EFAULT, -EINVAL. + */ +struct type86_fmt2_msg { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; +} __packed; + +static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_xcRB *xcRB) +{ + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + }; + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + } __packed * msg = ap_msg->message; + + int rcblen = CEIL4(xcRB->request_control_blk_length); + int replylen, req_sumlen, resp_sumlen; + char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen; + char *function_code; + + if (CEIL4(xcRB->request_control_blk_length) < + xcRB->request_control_blk_length) + return -EINVAL; /* overflow after alignment*/ + + /* length checks */ + ap_msg->length = sizeof(struct type6_hdr) + + CEIL4(xcRB->request_control_blk_length) + + xcRB->request_data_length; + if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE) + return -EINVAL; + + /* Overflow check + sum must be greater (or equal) than the largest operand */ + req_sumlen = CEIL4(xcRB->request_control_blk_length) + + xcRB->request_data_length; + if ((CEIL4(xcRB->request_control_blk_length) <= + xcRB->request_data_length) ? + (req_sumlen < xcRB->request_data_length) : + (req_sumlen < CEIL4(xcRB->request_control_blk_length))) { + return -EINVAL; + } + + if (CEIL4(xcRB->reply_control_blk_length) < + xcRB->reply_control_blk_length) + return -EINVAL; /* overflow after alignment*/ + + replylen = sizeof(struct type86_fmt2_msg) + + CEIL4(xcRB->reply_control_blk_length) + + xcRB->reply_data_length; + if (replylen > MSGTYPE06_MAX_MSG_SIZE) + return -EINVAL; + + /* Overflow check + sum must be greater (or equal) than the largest operand */ + resp_sumlen = CEIL4(xcRB->reply_control_blk_length) + + xcRB->reply_data_length; + if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ? + (resp_sumlen < xcRB->reply_data_length) : + (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) { + return -EINVAL; + } + + /* prepare type6 header */ + msg->hdr = static_type6_hdrX; + memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID)); + msg->hdr.ToCardLen1 = xcRB->request_control_blk_length; + if (xcRB->request_data_length) { + msg->hdr.offset2 = msg->hdr.offset1 + rcblen; + msg->hdr.ToCardLen2 = xcRB->request_data_length; + } + msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length; + msg->hdr.FromCardLen2 = xcRB->reply_data_length; + + /* prepare CPRB */ + if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr, + xcRB->request_control_blk_length)) + return -EFAULT; + if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > + xcRB->request_control_blk_length) + return -EINVAL; + function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; + memcpy(msg->hdr.function_code, function_code, + sizeof(msg->hdr.function_code)); + + if (memcmp(function_code, "US", 2) == 0) + ap_msg->special = 1; + else + ap_msg->special = 0; + + /* copy data block */ + if (xcRB->request_data_length && + copy_from_user(req_data, xcRB->request_data_address, + xcRB->request_data_length)) + return -EFAULT; + return 0; +} + +static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ep11_urb *xcRB) +{ + unsigned int lfmt; + + static struct type6_hdr static_type6_ep11_hdr = { + .type = 0x06, + .rqid = {0x00, 0x01}, + .function_code = {0x00, 0x00}, + .agent_id[0] = 0x58, /* {'X'} */ + .agent_id[1] = 0x43, /* {'C'} */ + .offset1 = 0x00000058, + }; + + struct { + struct type6_hdr hdr; + struct ep11_cprb cprbx; + unsigned char pld_tag; /* fixed value 0x30 */ + unsigned char pld_lenfmt; /* payload length format */ + } __packed * msg = ap_msg->message; + + struct pld_hdr { + unsigned char func_tag; /* fixed value 0x4 */ + unsigned char func_len; /* fixed value 0x4 */ + unsigned int func_val; /* function ID */ + unsigned char dom_tag; /* fixed value 0x4 */ + unsigned char dom_len; /* fixed value 0x4 */ + unsigned int dom_val; /* domain id */ + } __packed * payload_hdr; + + if (CEIL4(xcRB->req_len) < xcRB->req_len) + return -EINVAL; /* overflow after alignment*/ + + /* length checks */ + ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len; + if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE - + (sizeof(struct type6_hdr))) + return -EINVAL; + + if (CEIL4(xcRB->resp_len) < xcRB->resp_len) + return -EINVAL; /* overflow after alignment*/ + + if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE - + (sizeof(struct type86_fmt2_msg))) + return -EINVAL; + + /* prepare type6 header */ + msg->hdr = static_type6_ep11_hdr; + msg->hdr.ToCardLen1 = xcRB->req_len; + msg->hdr.FromCardLen1 = xcRB->resp_len; + + /* Import CPRB data from the ioctl input parameter */ + if (copy_from_user(&(msg->cprbx.cprb_len), + (char __force __user *)xcRB->req, xcRB->req_len)) { + return -EFAULT; + } + + /* + The target domain field within the cprb body/payload block will be + replaced by the usage domain for non-management commands only. + Therefore we check the first bit of the 'flags' parameter for + management command indication. + 0 - non management command + 1 - management command + */ + if (!((msg->cprbx.flags & 0x80) == 0x80)) { + msg->cprbx.target_id = (unsigned int) + AP_QID_QUEUE(zdev->ap_dev->qid); + + if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ + switch (msg->pld_lenfmt & 0x03) { + case 1: + lfmt = 2; + break; + case 2: + lfmt = 3; + break; + default: + return -EINVAL; + } + } else { + lfmt = 1; /* length format #1 */ + } + payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); + payload_hdr->dom_val = (unsigned int) + AP_QID_QUEUE(zdev->ap_dev->qid); + } + return 0; +} + +/** + * Copy results from a type 86 ICA reply message back to user space. + * + * @zdev: crypto device pointer + * @reply: reply AP message. + * @data: pointer to user output data + * @length: size of user output data + * + * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. + */ +struct type86x_reply { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + struct CPRBX cprbx; + unsigned char pad[4]; /* 4 byte function code/rules block ? */ + unsigned short length; + char text[0]; +} __packed; + +struct type86_ep11_reply { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + struct ep11_cprb cprbx; +} __packed; + +static int convert_type86_ica(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + static unsigned char static_pad[] = { + 0x00, 0x02, + 0x1B, 0x7B, 0x5D, 0xB5, 0x75, 0x01, 0x3D, 0xFD, + 0x8D, 0xD1, 0xC7, 0x03, 0x2D, 0x09, 0x23, 0x57, + 0x89, 0x49, 0xB9, 0x3F, 0xBB, 0x99, 0x41, 0x5B, + 0x75, 0x21, 0x7B, 0x9D, 0x3B, 0x6B, 0x51, 0x39, + 0xBB, 0x0D, 0x35, 0xB9, 0x89, 0x0F, 0x93, 0xA5, + 0x0B, 0x47, 0xF1, 0xD3, 0xBB, 0xCB, 0xF1, 0x9D, + 0x23, 0x73, 0x71, 0xFF, 0xF3, 0xF5, 0x45, 0xFB, + 0x61, 0x29, 0x23, 0xFD, 0xF1, 0x29, 0x3F, 0x7F, + 0x17, 0xB7, 0x1B, 0xA9, 0x19, 0xBD, 0x57, 0xA9, + 0xD7, 0x95, 0xA3, 0xCB, 0xED, 0x1D, 0xDB, 0x45, + 0x7D, 0x11, 0xD1, 0x51, 0x1B, 0xED, 0x71, 0xE9, + 0xB1, 0xD1, 0xAB, 0xAB, 0x21, 0x2B, 0x1B, 0x9F, + 0x3B, 0x9F, 0xF7, 0xF7, 0xBD, 0x63, 0xEB, 0xAD, + 0xDF, 0xB3, 0x6F, 0x5B, 0xDB, 0x8D, 0xA9, 0x5D, + 0xE3, 0x7D, 0x77, 0x49, 0x47, 0xF5, 0xA7, 0xFD, + 0xAB, 0x2F, 0x27, 0x35, 0x77, 0xD3, 0x49, 0xC9, + 0x09, 0xEB, 0xB1, 0xF9, 0xBF, 0x4B, 0xCB, 0x2B, + 0xEB, 0xEB, 0x05, 0xFF, 0x7D, 0xC7, 0x91, 0x8B, + 0x09, 0x83, 0xB9, 0xB9, 0x69, 0x33, 0x39, 0x6B, + 0x79, 0x75, 0x19, 0xBF, 0xBB, 0x07, 0x1D, 0xBD, + 0x29, 0xBF, 0x39, 0x95, 0x93, 0x1D, 0x35, 0xC7, + 0xC9, 0x4D, 0xE5, 0x97, 0x0B, 0x43, 0x9B, 0xF1, + 0x16, 0x93, 0x03, 0x1F, 0xA5, 0xFB, 0xDB, 0xF3, + 0x27, 0x4F, 0x27, 0x61, 0x05, 0x1F, 0xB9, 0x23, + 0x2F, 0xC3, 0x81, 0xA9, 0x23, 0x71, 0x55, 0x55, + 0xEB, 0xED, 0x41, 0xE5, 0xF3, 0x11, 0xF1, 0x43, + 0x69, 0x03, 0xBD, 0x0B, 0x37, 0x0F, 0x51, 0x8F, + 0x0B, 0xB5, 0x89, 0x5B, 0x67, 0xA9, 0xD9, 0x4F, + 0x01, 0xF9, 0x21, 0x77, 0x37, 0x73, 0x79, 0xC5, + 0x7F, 0x51, 0xC1, 0xCF, 0x97, 0xA1, 0x75, 0xAD, + 0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41, + 0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09 + }; + struct type86x_reply *msg = reply->message; + unsigned short service_rc, service_rs; + unsigned int reply_len, pad_len; + char *data; + + service_rc = msg->cprbx.ccp_rtcode; + if (unlikely(service_rc != 0)) { + service_rs = msg->cprbx.ccp_rscode; + if (service_rc == 8 && service_rs == 66) + return -EINVAL; + if (service_rc == 8 && service_rs == 65) + return -EINVAL; + if (service_rc == 8 && service_rs == 770) + return -EINVAL; + if (service_rc == 8 && service_rs == 783) { + zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; + return -EAGAIN; + } + if (service_rc == 12 && service_rs == 769) + return -EINVAL; + if (service_rc == 8 && service_rs == 72) + return -EINVAL; + zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, + msg->hdr.reply_code); + return -EAGAIN; /* repeat the request on a different device. */ + } + data = msg->text; + reply_len = msg->length - 2; + if (reply_len > outputdatalength) + return -EINVAL; + /* + * For all encipher requests, the length of the ciphertext (reply_len) + * will always equal the modulus length. For MEX decipher requests + * the output needs to get padded. Minimum pad size is 10. + * + * Currently, the cases where padding will be added is for: + * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support + * ZERO-PAD and CRT is only supported for PKD requests) + * - PCICC, always + */ + pad_len = outputdatalength - reply_len; + if (pad_len > 0) { + if (pad_len < 10) + return -EINVAL; + /* 'restore' padding left in the PCICC/PCIXCC card. */ + if (copy_to_user(outputdata, static_pad, pad_len - 1)) + return -EFAULT; + if (put_user(0, outputdata + pad_len - 1)) + return -EFAULT; + } + /* Copy the crypto response to user space. */ + if (copy_to_user(outputdata + pad_len, data, reply_len)) + return -EFAULT; + return 0; +} + +/** + * Copy results from a type 86 XCRB reply message back to user space. + * + * @zdev: crypto device pointer + * @reply: reply AP message. + * @xcRB: pointer to XCRB + * + * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. + */ +static int convert_type86_xcrb(struct zcrypt_device *zdev, + struct ap_message *reply, + struct ica_xcRB *xcRB) +{ + struct type86_fmt2_msg *msg = reply->message; + char *data = reply->message; + + /* Copy CPRB to user */ + if (copy_to_user(xcRB->reply_control_blk_addr, + data + msg->fmt2.offset1, msg->fmt2.count1)) + return -EFAULT; + xcRB->reply_control_blk_length = msg->fmt2.count1; + + /* Copy data buffer to user */ + if (msg->fmt2.count2) + if (copy_to_user(xcRB->reply_data_addr, + data + msg->fmt2.offset2, msg->fmt2.count2)) + return -EFAULT; + xcRB->reply_data_length = msg->fmt2.count2; + return 0; +} + +/** + * Copy results from a type 86 EP11 XCRB reply message back to user space. + * + * @zdev: crypto device pointer + * @reply: reply AP message. + * @xcRB: pointer to EP11 user request block + * + * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. + */ +static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev, + struct ap_message *reply, + struct ep11_urb *xcRB) +{ + struct type86_fmt2_msg *msg = reply->message; + char *data = reply->message; + + if (xcRB->resp_len < msg->fmt2.count1) + return -EINVAL; + + /* Copy response CPRB to user */ + if (copy_to_user((char __force __user *)xcRB->resp, + data + msg->fmt2.offset1, msg->fmt2.count1)) + return -EFAULT; + xcRB->resp_len = msg->fmt2.count1; + return 0; +} + +static int convert_type86_rng(struct zcrypt_device *zdev, + struct ap_message *reply, + char *buffer) +{ + struct { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + struct CPRBX cprbx; + } __packed * msg = reply->message; + char *data = reply->message; + + if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) + return -EINVAL; + memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); + return msg->fmt2.count2; +} + +static int convert_response_ica(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + struct type86x_reply *msg = reply->message; + + /* Response type byte is the second byte in the response. */ + switch (((unsigned char *) reply->message)[1]) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + return convert_error(zdev, reply); + case TYPE86_RSP_CODE: + if (msg->cprbx.ccp_rtcode && + (msg->cprbx.ccp_rscode == 0x14f) && + (outputdatalength > 256)) { + if (zdev->max_exp_bit_length <= 17) { + zdev->max_exp_bit_length = 17; + return -EAGAIN; + } else + return -EINVAL; + } + if (msg->hdr.reply_code) + return convert_error(zdev, reply); + if (msg->cprbx.cprb_ver_id == 0x02) + return convert_type86_ica(zdev, reply, + outputdata, outputdatalength); + /* Fall through, no break, incorrect cprb version is an unknown + * response */ + default: /* Unknown response type, this should NEVER EVER happen */ + zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +static int convert_response_xcrb(struct zcrypt_device *zdev, + struct ap_message *reply, + struct ica_xcRB *xcRB) +{ + struct type86x_reply *msg = reply->message; + + /* Response type byte is the second byte in the response. */ + switch (((unsigned char *) reply->message)[1]) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ + return convert_error(zdev, reply); + case TYPE86_RSP_CODE: + if (msg->hdr.reply_code) { + memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); + return convert_error(zdev, reply); + } + if (msg->cprbx.cprb_ver_id == 0x02) + return convert_type86_xcrb(zdev, reply, xcRB); + /* Fall through, no break, incorrect cprb version is an unknown + * response */ + default: /* Unknown response type, this should NEVER EVER happen */ + xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ + zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +static int convert_response_ep11_xcrb(struct zcrypt_device *zdev, + struct ap_message *reply, struct ep11_urb *xcRB) +{ + struct type86_ep11_reply *msg = reply->message; + + /* Response type byte is the second byte in the response. */ + switch (((unsigned char *)reply->message)[1]) { + case TYPE82_RSP_CODE: + case TYPE87_RSP_CODE: + return convert_error(zdev, reply); + case TYPE86_RSP_CODE: + if (msg->hdr.reply_code) + return convert_error(zdev, reply); + if (msg->cprbx.cprb_ver_id == 0x04) + return convert_type86_ep11_xcrb(zdev, reply, xcRB); + /* Fall through, no break, incorrect cprb version is an unknown resp.*/ + default: /* Unknown response type, this should NEVER EVER happen */ + zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +static int convert_response_rng(struct zcrypt_device *zdev, + struct ap_message *reply, + char *data) +{ + struct type86x_reply *msg = reply->message; + + switch (msg->hdr.type) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + return -EINVAL; + case TYPE86_RSP_CODE: + if (msg->hdr.reply_code) + return -EINVAL; + if (msg->cprbx.cprb_ver_id == 0x02) + return convert_type86_rng(zdev, reply, data); + /* Fall through, no break, incorrect cprb version is an unknown + * response */ + default: /* Unknown response type, this should NEVER EVER happen */ + zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @ap_dev: pointer to the AP device + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_msgtype6_receive(struct ap_device *ap_dev, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct response_type *resp_type = + (struct response_type *) msg->private; + struct type86x_reply *t86r; + int length; + + /* Copy the reply message to the request message buffer. */ + if (IS_ERR(reply)) { + memcpy(msg->message, &error_reply, sizeof(error_reply)); + goto out; + } + t86r = reply->message; + if (t86r->hdr.type == TYPE86_RSP_CODE && + t86r->cprbx.cprb_ver_id == 0x02) { + switch (resp_type->type) { + case PCIXCC_RESPONSE_TYPE_ICA: + length = sizeof(struct type86x_reply) + + t86r->length - 2; + length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length); + memcpy(msg->message, reply->message, length); + break; + case PCIXCC_RESPONSE_TYPE_XCRB: + length = t86r->fmt2.offset2 + t86r->fmt2.count2; + length = min(MSGTYPE06_MAX_MSG_SIZE, length); + memcpy(msg->message, reply->message, length); + break; + default: + memcpy(msg->message, &error_reply, + sizeof(error_reply)); + } + } else + memcpy(msg->message, reply->message, sizeof(error_reply)); +out: + complete(&(resp_type->work)); +} + +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @ap_dev: pointer to the AP device + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct response_type *resp_type = + (struct response_type *)msg->private; + struct type86_ep11_reply *t86r; + int length; + + /* Copy the reply message to the request message buffer. */ + if (IS_ERR(reply)) { + memcpy(msg->message, &error_reply, sizeof(error_reply)); + goto out; + } + t86r = reply->message; + if (t86r->hdr.type == TYPE86_RSP_CODE && + t86r->cprbx.cprb_ver_id == 0x04) { + switch (resp_type->type) { + case PCIXCC_RESPONSE_TYPE_EP11: + length = t86r->fmt2.offset1 + t86r->fmt2.count1; + length = min(MSGTYPE06_MAX_MSG_SIZE, length); + memcpy(msg->message, reply->message, length); + break; + default: + memcpy(msg->message, &error_reply, sizeof(error_reply)); + } + } else { + memcpy(msg->message, reply->message, sizeof(error_reply)); + } +out: + complete(&(resp_type->work)); +} + +static atomic_t zcrypt_step = ATOMIC_INIT(0); + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to handle a modexpo request. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @mex: pointer to the modexpo request buffer + */ +static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev, + struct ica_rsa_modexpo *mex) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_ICA, + }; + int rc; + + ap_init_message(&ap_msg); + ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_msgtype6_receive; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex); + if (rc) + goto out_free; + init_completion(&resp_type.work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) + rc = convert_response_ica(zdev, &ap_msg, mex->outputdata, + mex->outputdatalength); + else + /* Signal pending. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); +out_free: + free_page((unsigned long) ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to handle a modexpo_crt request. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @crt: pointer to the modexpoc_crt request buffer + */ +static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev, + struct ica_rsa_modexpo_crt *crt) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_ICA, + }; + int rc; + + ap_init_message(&ap_msg); + ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_msgtype6_receive; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt); + if (rc) + goto out_free; + init_completion(&resp_type.work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) + rc = convert_response_ica(zdev, &ap_msg, crt->outputdata, + crt->outputdatalength); + else + /* Signal pending. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); +out_free: + free_page((unsigned long) ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to handle a send_cprb request. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @xcRB: pointer to the send_cprb request buffer + */ +static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev, + struct ica_xcRB *xcRB) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_XCRB, + }; + int rc; + + ap_init_message(&ap_msg); + ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_msgtype6_receive; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB); + if (rc) + goto out_free; + init_completion(&resp_type.work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) + rc = convert_response_xcrb(zdev, &ap_msg, xcRB); + else + /* Signal pending. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); +out_free: + kzfree(ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the CEX4P + * device to handle a send_ep11_cprb request. + * @zdev: pointer to zcrypt_device structure that identifies the + * CEX4P device to the request distributor + * @xcRB: pointer to the ep11 user request block + */ +static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev, + struct ep11_urb *xcrb) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_EP11, + }; + int rc; + + ap_init_message(&ap_msg); + ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_msgtype6_receive_ep11; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb); + if (rc) + goto out_free; + init_completion(&resp_type.work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) + rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb); + else /* Signal pending. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + +out_free: + kzfree(ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to generate random data. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @buffer: pointer to a memory page to return random data + */ + +static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev, + char *buffer) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_XCRB, + }; + int rc; + + ap_init_message(&ap_msg); + ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_msgtype6_receive; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE); + init_completion(&resp_type.work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) + rc = convert_response_rng(zdev, &ap_msg, buffer); + else + /* Signal pending. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + kfree(ap_msg.message); + return rc; +} + +/** + * The crypto operations for a PCIXCC/CEX2C card. + */ +static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { + .owner = THIS_MODULE, + .variant = MSGTYPE06_VARIANT_NORNG, + .rsa_modexpo = zcrypt_msgtype6_modexpo, + .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, + .send_cprb = zcrypt_msgtype6_send_cprb, +}; + +static struct zcrypt_ops zcrypt_msgtype6_ops = { + .owner = THIS_MODULE, + .variant = MSGTYPE06_VARIANT_DEFAULT, + .rsa_modexpo = zcrypt_msgtype6_modexpo, + .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, + .send_cprb = zcrypt_msgtype6_send_cprb, + .rng = zcrypt_msgtype6_rng, +}; + +static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { + .owner = THIS_MODULE, + .variant = MSGTYPE06_VARIANT_EP11, + .rsa_modexpo = NULL, + .rsa_modexpo_crt = NULL, + .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb, +}; + +int __init zcrypt_msgtype6_init(void) +{ + zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops); + zcrypt_msgtype_register(&zcrypt_msgtype6_ops); + zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops); + return 0; +} + +void __exit zcrypt_msgtype6_exit(void) +{ + zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops); + zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops); + zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops); +} + +module_init(zcrypt_msgtype6_init); +module_exit(zcrypt_msgtype6_exit); diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h new file mode 100644 index 00000000000..20724757062 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_msgtype6.h @@ -0,0 +1,171 @@ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _ZCRYPT_MSGTYPE6_H_ +#define _ZCRYPT_MSGTYPE6_H_ + +#include <asm/zcrypt.h> + +#define MSGTYPE06_NAME "zcrypt_msgtype6" +#define MSGTYPE06_VARIANT_DEFAULT 0 +#define MSGTYPE06_VARIANT_NORNG 1 +#define MSGTYPE06_VARIANT_EP11 2 + +#define MSGTYPE06_MAX_MSG_SIZE (12*1024) + +/** + * The type 6 message family is associated with PCICC or PCIXCC cards. + * + * It contains a message header followed by a CPRB, both of which + * are described below. + * + * Note that all reserved fields must be zeroes. + */ +struct type6_hdr { + unsigned char reserved1; /* 0x00 */ + unsigned char type; /* 0x06 */ + unsigned char reserved2[2]; /* 0x0000 */ + unsigned char right[4]; /* 0x00000000 */ + unsigned char reserved3[2]; /* 0x0000 */ + unsigned char reserved4[2]; /* 0x0000 */ + unsigned char apfs[4]; /* 0x00000000 */ + unsigned int offset1; /* 0x00000058 (offset to CPRB) */ + unsigned int offset2; /* 0x00000000 */ + unsigned int offset3; /* 0x00000000 */ + unsigned int offset4; /* 0x00000000 */ + unsigned char agent_id[16]; /* PCICC: */ + /* 0x0100 */ + /* 0x4343412d4150504c202020 */ + /* 0x010101 */ + /* PCIXCC: */ + /* 0x4341000000000000 */ + /* 0x0000000000000000 */ + unsigned char rqid[2]; /* rqid. internal to 603 */ + unsigned char reserved5[2]; /* 0x0000 */ + unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */ + unsigned char reserved6[2]; /* 0x0000 */ + unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */ + unsigned int ToCardLen2; /* db len 0x00000000 for PKD */ + unsigned int ToCardLen3; /* 0x00000000 */ + unsigned int ToCardLen4; /* 0x00000000 */ + unsigned int FromCardLen1; /* response buffer length */ + unsigned int FromCardLen2; /* db len 0x00000000 for PKD */ + unsigned int FromCardLen3; /* 0x00000000 */ + unsigned int FromCardLen4; /* 0x00000000 */ +} __packed; + +/** + * The type 86 message family is associated with PCICC and PCIXCC cards. + * + * It contains a message header followed by a CPRB. The CPRB is + * the same as the request CPRB, which is described above. + * + * If format is 1, an error condition exists and no data beyond + * the 8-byte message header is of interest. + * + * The non-error message is shown below. + * + * Note that all reserved fields must be zeroes. + */ +struct type86_hdr { + unsigned char reserved1; /* 0x00 */ + unsigned char type; /* 0x86 */ + unsigned char format; /* 0x01 (error) or 0x02 (ok) */ + unsigned char reserved2; /* 0x00 */ + unsigned char reply_code; /* reply code (see above) */ + unsigned char reserved3[3]; /* 0x000000 */ +} __packed; + +#define TYPE86_RSP_CODE 0x86 +#define TYPE87_RSP_CODE 0x87 +#define TYPE86_FMT2 0x02 + +struct type86_fmt2_ext { + unsigned char reserved[4]; /* 0x00000000 */ + unsigned char apfs[4]; /* final status */ + unsigned int count1; /* length of CPRB + parameters */ + unsigned int offset1; /* offset to CPRB */ + unsigned int count2; /* 0x00000000 */ + unsigned int offset2; /* db offset 0x00000000 for PKD */ + unsigned int count3; /* 0x00000000 */ + unsigned int offset3; /* 0x00000000 */ + unsigned int count4; /* 0x00000000 */ + unsigned int offset4; /* 0x00000000 */ +} __packed; + +/** + * Prepare a type6 CPRB message for random number generation + * + * @ap_dev: AP device pointer + * @ap_msg: pointer to AP message + */ +static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev, + struct ap_message *ap_msg, + unsigned random_number_length) +{ + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + char function_code[2]; + short int rule_length; + char rule[8]; + short int verb_length; + short int key_length; + } __packed * msg = ap_msg->message; + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + .agent_id = {'C', 'A'}, + .function_code = {'R', 'L'}, + .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr), + .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr), + }; + static struct CPRBX local_cprbx = { + .cprb_len = 0x00dc, + .cprb_ver_id = 0x02, + .func_id = {0x54, 0x32}, + .req_parml = sizeof(*msg) - sizeof(msg->hdr) - + sizeof(msg->cprbx), + .rpl_msgbl = sizeof(*msg) - sizeof(msg->hdr), + }; + + msg->hdr = static_type6_hdrX; + msg->hdr.FromCardLen2 = random_number_length, + msg->cprbx = local_cprbx; + msg->cprbx.rpl_datal = random_number_length, + msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid); + memcpy(msg->function_code, msg->hdr.function_code, 0x02); + msg->rule_length = 0x0a; + memcpy(msg->rule, "RANDOM ", 8); + msg->verb_length = 0x02; + msg->key_length = 0x02; + ap_msg->length = sizeof(*msg); +} + +int zcrypt_msgtype6_init(void); +void zcrypt_msgtype6_exit(void); + +#endif /* _ZCRYPT_MSGTYPE6_H_ */ diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index e78df3671ca..7a743f4c646 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/crypto/zcrypt_pcica.c - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2006 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * @@ -26,10 +24,14 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/err.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/uaccess.h> #include "ap_bus.h" @@ -52,13 +54,11 @@ static struct ap_device_id zcrypt_pcica_ids[] = { { /* end of list */ }, }; -#ifndef CONFIG_ZCRYPT_MONOLITHIC MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids); MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, " - "Copyright 2001, 2006 IBM Corporation"); + "Copyright IBM Corp. 2001, 2006"); MODULE_LICENSE("GPL"); -#endif static int zcrypt_pcica_probe(struct ap_device *ap_dev); static void zcrypt_pcica_remove(struct ap_device *ap_dev); @@ -68,7 +68,6 @@ static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *, static struct ap_driver zcrypt_pcica_driver = { .probe = zcrypt_pcica_probe, .remove = zcrypt_pcica_remove, - .receive = zcrypt_pcica_receive, .ids = zcrypt_pcica_ids, .request_timeout = PCICA_CLEANUP_TIME, }; @@ -203,6 +202,10 @@ static int convert_type84(struct zcrypt_device *zdev, if (t84h->len < sizeof(*t84h) + outputdatalength) { /* The result is too short, the PCICA card may not do that.. */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, t84h->code); return -EAGAIN; /* repeat the request on a different device. */ } BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE); @@ -227,6 +230,10 @@ static int convert_response(struct zcrypt_device *zdev, outputdata, outputdatalength); default: /* Unknown response type, this should NEVER EVER happen */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); return -EAGAIN; /* repeat the request on a different device. */ } } @@ -285,6 +292,7 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev, ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; + ap_msg.receive = zcrypt_pcica_receive; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg.private = &work; @@ -323,6 +331,7 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev, ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; + ap_msg.receive = zcrypt_pcica_receive; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg.private = &work; @@ -372,6 +381,7 @@ static int zcrypt_pcica_probe(struct ap_device *ap_dev) zdev->min_mod_size = PCICA_MIN_MOD_SIZE; zdev->max_mod_size = PCICA_MAX_MOD_SIZE; zdev->speed_rating = PCICA_SPEED_RATING; + zdev->max_exp_bit_length = PCICA_MAX_MOD_SIZE; ap_dev->reply = &zdev->reply; ap_dev->private = zdev; rc = zcrypt_device_register(zdev); @@ -406,7 +416,5 @@ void zcrypt_pcica_exit(void) ap_driver_unregister(&zcrypt_pcica_driver); } -#ifndef CONFIG_ZCRYPT_MONOLITHIC module_init(zcrypt_pcica_init); module_exit(zcrypt_pcica_exit); -#endif diff --git a/drivers/s390/crypto/zcrypt_pcica.h b/drivers/s390/crypto/zcrypt_pcica.h index 3be11187f6d..9a59155cad5 100644 --- a/drivers/s390/crypto/zcrypt_pcica.h +++ b/drivers/s390/crypto/zcrypt_pcica.h @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/crypto/zcrypt_pcica.h - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2006 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index 142f72a2ca5..4d14c04b746 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/crypto/zcrypt_pcicc.c - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2006 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * @@ -26,10 +24,14 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/module.h> #include <linux/init.h> +#include <linux/gfp.h> #include <linux/err.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/uaccess.h> #include "ap_bus.h" @@ -64,13 +66,11 @@ static struct ap_device_id zcrypt_pcicc_ids[] = { { /* end of list */ }, }; -#ifndef CONFIG_ZCRYPT_MONOLITHIC MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids); MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, " - "Copyright 2001, 2006 IBM Corporation"); + "Copyright IBM Corp. 2001, 2006"); MODULE_LICENSE("GPL"); -#endif static int zcrypt_pcicc_probe(struct ap_device *ap_dev); static void zcrypt_pcicc_remove(struct ap_device *ap_dev); @@ -80,7 +80,6 @@ static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *, static struct ap_driver zcrypt_pcicc_driver = { .probe = zcrypt_pcicc_probe, .remove = zcrypt_pcicc_remove, - .receive = zcrypt_pcicc_receive, .ids = zcrypt_pcicc_ids, .request_timeout = PCICC_CLEANUP_TIME, }; @@ -376,6 +375,11 @@ static int convert_type86(struct zcrypt_device *zdev, if (service_rc == 8 && service_rs == 72) return -EINVAL; zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", + zdev->ap_dev->qid, zdev->online, + msg->hdr.reply_code); return -EAGAIN; /* repeat the request on a different device. */ } data = msg->text; @@ -429,6 +433,10 @@ static int convert_response(struct zcrypt_device *zdev, /* no break, incorrect cprb version is an unknown response */ default: /* Unknown response type, this should NEVER EVER happen */ zdev->online = 0; + pr_err("Cryptographic device %x failed and was set offline\n", + zdev->ap_dev->qid); + ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", + zdev->ap_dev->qid, zdev->online); return -EAGAIN; /* repeat the request on a different device. */ } } @@ -489,6 +497,7 @@ static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev, ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; + ap_msg.receive = zcrypt_pcicc_receive; ap_msg.length = PAGE_SIZE; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); @@ -528,6 +537,7 @@ static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev, ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; + ap_msg.receive = zcrypt_pcicc_receive; ap_msg.length = PAGE_SIZE; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); @@ -578,6 +588,7 @@ static int zcrypt_pcicc_probe(struct ap_device *ap_dev) zdev->min_mod_size = PCICC_MIN_MOD_SIZE; zdev->max_mod_size = PCICC_MAX_MOD_SIZE; zdev->speed_rating = PCICC_SPEED_RATING; + zdev->max_exp_bit_length = PCICC_MAX_MOD_SIZE; ap_dev->reply = &zdev->reply; ap_dev->private = zdev; rc = zcrypt_device_register(zdev); @@ -612,7 +623,5 @@ void zcrypt_pcicc_exit(void) ap_driver_unregister(&zcrypt_pcicc_driver); } -#ifndef CONFIG_ZCRYPT_MONOLITHIC module_init(zcrypt_pcicc_init); module_exit(zcrypt_pcicc_exit); -#endif diff --git a/drivers/s390/crypto/zcrypt_pcicc.h b/drivers/s390/crypto/zcrypt_pcicc.h index 6d4454846c8..7fe27e15075 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.h +++ b/drivers/s390/crypto/zcrypt_pcicc.h @@ -1,9 +1,7 @@ /* - * linux/drivers/s390/crypto/zcrypt_pcicc.h - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2006 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index 68f3e6204db..899ffa19f5e 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -1,15 +1,14 @@ /* - * linux/drivers/s390/crypto/zcrypt_pcixcc.c - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2012 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -30,13 +29,14 @@ #include <linux/init.h> #include <linux/err.h> #include <linux/delay.h> -#include <asm/atomic.h> +#include <linux/slab.h> +#include <linux/atomic.h> #include <asm/uaccess.h> #include "ap_bus.h" #include "zcrypt_api.h" #include "zcrypt_error.h" -#include "zcrypt_pcicc.h" +#include "zcrypt_msgtype6.h" #include "zcrypt_pcixcc.h" #include "zcrypt_cca_key.h" @@ -44,22 +44,17 @@ #define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ #define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ #define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE -#define CEX3C_MAX_MOD_SIZE PCIXCC_MAX_MOD_SIZE +#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ #define PCIXCC_MCL2_SPEED_RATING 7870 #define PCIXCC_MCL3_SPEED_RATING 7870 #define CEX2C_SPEED_RATING 7000 -#define CEX3C_SPEED_RATING 6500 /* FIXME: needs finetuning */ +#define CEX3C_SPEED_RATING 6500 #define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ #define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024) -#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE -#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024) -#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024) - -#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE #define PCIXCC_CLEANUP_TIME (15*HZ) @@ -79,782 +74,23 @@ static struct ap_device_id zcrypt_pcixcc_ids[] = { { /* end of list */ }, }; -#ifndef CONFIG_ZCRYPT_MONOLITHIC MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids); MODULE_AUTHOR("IBM Corporation"); -MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " - "Copyright 2001, 2006 IBM Corporation"); +MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \ + "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); -#endif static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); -static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *, - struct ap_message *); static struct ap_driver zcrypt_pcixcc_driver = { .probe = zcrypt_pcixcc_probe, .remove = zcrypt_pcixcc_remove, - .receive = zcrypt_pcixcc_receive, .ids = zcrypt_pcixcc_ids, .request_timeout = PCIXCC_CLEANUP_TIME, }; /** - * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C - * card in a type6 message. The 3 fields that must be filled in at execution - * time are req_parml, rpl_parml and usage_domain. - * Everything about this interface is ascii/big-endian, since the - * device does *not* have 'Intel inside'. - * - * The CPRBX is followed immediately by the parm block. - * The parm block contains: - * - function code ('PD' 0x5044 or 'PK' 0x504B) - * - rule block (one of:) - * + 0x000A 'PKCS-1.2' (MCL2 'PD') - * + 0x000A 'ZERO-PAD' (MCL2 'PK') - * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD') - * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK') - * - VUD block - */ -static struct CPRBX static_cprbx = { - .cprb_len = 0x00DC, - .cprb_ver_id = 0x02, - .func_id = {0x54,0x32}, -}; - -/** - * Convert a ICAMEX message to a type6 MEX message. - * - * @zdev: crypto device pointer - * @ap_msg: pointer to AP message - * @mex: pointer to user input data - * - * Returns 0 on success or -EFAULT. - */ -static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, - struct ap_message *ap_msg, - struct ica_rsa_modexpo *mex) -{ - static struct type6_hdr static_type6_hdrX = { - .type = 0x06, - .offset1 = 0x00000058, - .agent_id = {'C','A',}, - .function_code = {'P','K'}, - }; - static struct function_and_rules_block static_pke_fnr = { - .function_code = {'P','K'}, - .ulen = 10, - .only_rule = {'M','R','P',' ',' ',' ',' ',' '} - }; - static struct function_and_rules_block static_pke_fnr_MCL2 = { - .function_code = {'P','K'}, - .ulen = 10, - .only_rule = {'Z','E','R','O','-','P','A','D'} - }; - struct { - struct type6_hdr hdr; - struct CPRBX cprbx; - struct function_and_rules_block fr; - unsigned short length; - char text[0]; - } __attribute__((packed)) *msg = ap_msg->message; - int size; - - /* VUD.ciphertext */ - msg->length = mex->inputdatalength + 2; - if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength)) - return -EFAULT; - - /* Set up key which is located after the variable length text. */ - size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1); - if (size < 0) - return size; - size += sizeof(*msg) + mex->inputdatalength; - - /* message header, cprbx and f&r */ - msg->hdr = static_type6_hdrX; - msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); - msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); - - msg->cprbx = static_cprbx; - msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); - msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; - - msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? - static_pke_fnr_MCL2 : static_pke_fnr; - - msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx); - - ap_msg->length = size; - return 0; -} - -/** - * Convert a ICACRT message to a type6 CRT message. - * - * @zdev: crypto device pointer - * @ap_msg: pointer to AP message - * @crt: pointer to user input data - * - * Returns 0 on success or -EFAULT. - */ -static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, - struct ap_message *ap_msg, - struct ica_rsa_modexpo_crt *crt) -{ - static struct type6_hdr static_type6_hdrX = { - .type = 0x06, - .offset1 = 0x00000058, - .agent_id = {'C','A',}, - .function_code = {'P','D'}, - }; - static struct function_and_rules_block static_pkd_fnr = { - .function_code = {'P','D'}, - .ulen = 10, - .only_rule = {'Z','E','R','O','-','P','A','D'} - }; - - static struct function_and_rules_block static_pkd_fnr_MCL2 = { - .function_code = {'P','D'}, - .ulen = 10, - .only_rule = {'P','K','C','S','-','1','.','2'} - }; - struct { - struct type6_hdr hdr; - struct CPRBX cprbx; - struct function_and_rules_block fr; - unsigned short length; - char text[0]; - } __attribute__((packed)) *msg = ap_msg->message; - int size; - - /* VUD.ciphertext */ - msg->length = crt->inputdatalength + 2; - if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength)) - return -EFAULT; - - /* Set up key which is located after the variable length text. */ - size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1); - if (size < 0) - return size; - size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ - - /* message header, cprbx and f&r */ - msg->hdr = static_type6_hdrX; - msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); - msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); - - msg->cprbx = static_cprbx; - msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); - msg->cprbx.req_parml = msg->cprbx.rpl_msgbl = - size - sizeof(msg->hdr) - sizeof(msg->cprbx); - - msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? - static_pkd_fnr_MCL2 : static_pkd_fnr; - - ap_msg->length = size; - return 0; -} - -/** - * Convert a XCRB message to a type6 CPRB message. - * - * @zdev: crypto device pointer - * @ap_msg: pointer to AP message - * @xcRB: pointer to user input data - * - * Returns 0 on success or -EFAULT. - */ -struct type86_fmt2_msg { - struct type86_hdr hdr; - struct type86_fmt2_ext fmt2; -} __attribute__((packed)); - -static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, - struct ap_message *ap_msg, - struct ica_xcRB *xcRB) -{ - static struct type6_hdr static_type6_hdrX = { - .type = 0x06, - .offset1 = 0x00000058, - }; - struct { - struct type6_hdr hdr; - struct CPRBX cprbx; - } __attribute__((packed)) *msg = ap_msg->message; - - int rcblen = CEIL4(xcRB->request_control_blk_length); - int replylen; - char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen; - char *function_code; - - /* length checks */ - ap_msg->length = sizeof(struct type6_hdr) + - CEIL4(xcRB->request_control_blk_length) + - xcRB->request_data_length; - if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) - return -EFAULT; - if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE) - return -EFAULT; - if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) - return -EFAULT; - replylen = CEIL4(xcRB->reply_control_blk_length) + - CEIL4(xcRB->reply_data_length) + - sizeof(struct type86_fmt2_msg); - if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) { - xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE - - (sizeof(struct type86_fmt2_msg) + - CEIL4(xcRB->reply_data_length)); - } - - /* prepare type6 header */ - msg->hdr = static_type6_hdrX; - memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID)); - msg->hdr.ToCardLen1 = xcRB->request_control_blk_length; - if (xcRB->request_data_length) { - msg->hdr.offset2 = msg->hdr.offset1 + rcblen; - msg->hdr.ToCardLen2 = xcRB->request_data_length; - } - msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length; - msg->hdr.FromCardLen2 = xcRB->reply_data_length; - - /* prepare CPRB */ - if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr, - xcRB->request_control_blk_length)) - return -EFAULT; - if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > - xcRB->request_control_blk_length) - return -EFAULT; - function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; - memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); - - if (memcmp(function_code, "US", 2) == 0) - ap_msg->special = 1; - else - ap_msg->special = 0; - - /* copy data block */ - if (xcRB->request_data_length && - copy_from_user(req_data, xcRB->request_data_address, - xcRB->request_data_length)) - return -EFAULT; - return 0; -} - -/** - * Prepare a type6 CPRB message for random number generation - * - * @ap_dev: AP device pointer - * @ap_msg: pointer to AP message - */ -static void rng_type6CPRB_msgX(struct ap_device *ap_dev, - struct ap_message *ap_msg, - unsigned random_number_length) -{ - struct { - struct type6_hdr hdr; - struct CPRBX cprbx; - char function_code[2]; - short int rule_length; - char rule[8]; - short int verb_length; - short int key_length; - } __attribute__((packed)) *msg = ap_msg->message; - static struct type6_hdr static_type6_hdrX = { - .type = 0x06, - .offset1 = 0x00000058, - .agent_id = {'C', 'A'}, - .function_code = {'R', 'L'}, - .ToCardLen1 = sizeof *msg - sizeof(msg->hdr), - .FromCardLen1 = sizeof *msg - sizeof(msg->hdr), - }; - static struct CPRBX local_cprbx = { - .cprb_len = 0x00dc, - .cprb_ver_id = 0x02, - .func_id = {0x54, 0x32}, - .req_parml = sizeof *msg - sizeof(msg->hdr) - - sizeof(msg->cprbx), - .rpl_msgbl = sizeof *msg - sizeof(msg->hdr), - }; - - msg->hdr = static_type6_hdrX; - msg->hdr.FromCardLen2 = random_number_length, - msg->cprbx = local_cprbx; - msg->cprbx.rpl_datal = random_number_length, - msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid); - memcpy(msg->function_code, msg->hdr.function_code, 0x02); - msg->rule_length = 0x0a; - memcpy(msg->rule, "RANDOM ", 8); - msg->verb_length = 0x02; - msg->key_length = 0x02; - ap_msg->length = sizeof *msg; -} - -/** - * Copy results from a type 86 ICA reply message back to user space. - * - * @zdev: crypto device pointer - * @reply: reply AP message. - * @data: pointer to user output data - * @length: size of user output data - * - * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. - */ -struct type86x_reply { - struct type86_hdr hdr; - struct type86_fmt2_ext fmt2; - struct CPRBX cprbx; - unsigned char pad[4]; /* 4 byte function code/rules block ? */ - unsigned short length; - char text[0]; -} __attribute__((packed)); - -static int convert_type86_ica(struct zcrypt_device *zdev, - struct ap_message *reply, - char __user *outputdata, - unsigned int outputdatalength) -{ - static unsigned char static_pad[] = { - 0x00,0x02, - 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD, - 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57, - 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B, - 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39, - 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5, - 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D, - 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB, - 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F, - 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9, - 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45, - 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9, - 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F, - 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD, - 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D, - 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD, - 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9, - 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B, - 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B, - 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B, - 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD, - 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7, - 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1, - 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3, - 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23, - 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55, - 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43, - 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F, - 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F, - 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5, - 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD, - 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41, - 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09 - }; - struct type86x_reply *msg = reply->message; - unsigned short service_rc, service_rs; - unsigned int reply_len, pad_len; - char *data; - - service_rc = msg->cprbx.ccp_rtcode; - if (unlikely(service_rc != 0)) { - service_rs = msg->cprbx.ccp_rscode; - if (service_rc == 8 && service_rs == 66) - return -EINVAL; - if (service_rc == 8 && service_rs == 65) - return -EINVAL; - if (service_rc == 8 && service_rs == 770) - return -EINVAL; - if (service_rc == 8 && service_rs == 783) { - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; - return -EAGAIN; - } - if (service_rc == 12 && service_rs == 769) - return -EINVAL; - if (service_rc == 8 && service_rs == 72) - return -EINVAL; - zdev->online = 0; - return -EAGAIN; /* repeat the request on a different device. */ - } - data = msg->text; - reply_len = msg->length - 2; - if (reply_len > outputdatalength) - return -EINVAL; - /* - * For all encipher requests, the length of the ciphertext (reply_len) - * will always equal the modulus length. For MEX decipher requests - * the output needs to get padded. Minimum pad size is 10. - * - * Currently, the cases where padding will be added is for: - * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support - * ZERO-PAD and CRT is only supported for PKD requests) - * - PCICC, always - */ - pad_len = outputdatalength - reply_len; - if (pad_len > 0) { - if (pad_len < 10) - return -EINVAL; - /* 'restore' padding left in the PCICC/PCIXCC card. */ - if (copy_to_user(outputdata, static_pad, pad_len - 1)) - return -EFAULT; - if (put_user(0, outputdata + pad_len - 1)) - return -EFAULT; - } - /* Copy the crypto response to user space. */ - if (copy_to_user(outputdata + pad_len, data, reply_len)) - return -EFAULT; - return 0; -} - -/** - * Copy results from a type 86 XCRB reply message back to user space. - * - * @zdev: crypto device pointer - * @reply: reply AP message. - * @xcRB: pointer to XCRB - * - * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. - */ -static int convert_type86_xcrb(struct zcrypt_device *zdev, - struct ap_message *reply, - struct ica_xcRB *xcRB) -{ - struct type86_fmt2_msg *msg = reply->message; - char *data = reply->message; - - /* Copy CPRB to user */ - if (copy_to_user(xcRB->reply_control_blk_addr, - data + msg->fmt2.offset1, msg->fmt2.count1)) - return -EFAULT; - xcRB->reply_control_blk_length = msg->fmt2.count1; - - /* Copy data buffer to user */ - if (msg->fmt2.count2) - if (copy_to_user(xcRB->reply_data_addr, - data + msg->fmt2.offset2, msg->fmt2.count2)) - return -EFAULT; - xcRB->reply_data_length = msg->fmt2.count2; - return 0; -} - -static int convert_type86_rng(struct zcrypt_device *zdev, - struct ap_message *reply, - char *buffer) -{ - struct { - struct type86_hdr hdr; - struct type86_fmt2_ext fmt2; - struct CPRBX cprbx; - } __attribute__((packed)) *msg = reply->message; - char *data = reply->message; - - if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) - return -EINVAL; - memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); - return msg->fmt2.count2; -} - -static int convert_response_ica(struct zcrypt_device *zdev, - struct ap_message *reply, - char __user *outputdata, - unsigned int outputdatalength) -{ - struct type86x_reply *msg = reply->message; - - /* Response type byte is the second byte in the response. */ - switch (((unsigned char *) reply->message)[1]) { - case TYPE82_RSP_CODE: - case TYPE88_RSP_CODE: - return convert_error(zdev, reply); - case TYPE86_RSP_CODE: - if (msg->hdr.reply_code) - return convert_error(zdev, reply); - if (msg->cprbx.cprb_ver_id == 0x02) - return convert_type86_ica(zdev, reply, - outputdata, outputdatalength); - /* Fall through, no break, incorrect cprb version is an unknown - * response */ - default: /* Unknown response type, this should NEVER EVER happen */ - zdev->online = 0; - return -EAGAIN; /* repeat the request on a different device. */ - } -} - -static int convert_response_xcrb(struct zcrypt_device *zdev, - struct ap_message *reply, - struct ica_xcRB *xcRB) -{ - struct type86x_reply *msg = reply->message; - - /* Response type byte is the second byte in the response. */ - switch (((unsigned char *) reply->message)[1]) { - case TYPE82_RSP_CODE: - case TYPE88_RSP_CODE: - xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ - return convert_error(zdev, reply); - case TYPE86_RSP_CODE: - if (msg->hdr.reply_code) { - memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); - return convert_error(zdev, reply); - } - if (msg->cprbx.cprb_ver_id == 0x02) - return convert_type86_xcrb(zdev, reply, xcRB); - /* Fall through, no break, incorrect cprb version is an unknown - * response */ - default: /* Unknown response type, this should NEVER EVER happen */ - xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ - zdev->online = 0; - return -EAGAIN; /* repeat the request on a different device. */ - } -} - -static int convert_response_rng(struct zcrypt_device *zdev, - struct ap_message *reply, - char *data) -{ - struct type86x_reply *msg = reply->message; - - switch (msg->hdr.type) { - case TYPE82_RSP_CODE: - case TYPE88_RSP_CODE: - return -EINVAL; - case TYPE86_RSP_CODE: - if (msg->hdr.reply_code) - return -EINVAL; - if (msg->cprbx.cprb_ver_id == 0x02) - return convert_type86_rng(zdev, reply, data); - /* Fall through, no break, incorrect cprb version is an unknown - * response */ - default: /* Unknown response type, this should NEVER EVER happen */ - zdev->online = 0; - return -EAGAIN; /* repeat the request on a different device. */ - } -} - -/** - * This function is called from the AP bus code after a crypto request - * "msg" has finished with the reply message "reply". - * It is called from tasklet context. - * @ap_dev: pointer to the AP device - * @msg: pointer to the AP message - * @reply: pointer to the AP reply message - */ -static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, - struct ap_message *msg, - struct ap_message *reply) -{ - static struct error_hdr error_reply = { - .type = TYPE82_RSP_CODE, - .reply_code = REP82_ERROR_MACHINE_FAILURE, - }; - struct response_type *resp_type = - (struct response_type *) msg->private; - struct type86x_reply *t86r; - int length; - - /* Copy the reply message to the request message buffer. */ - if (IS_ERR(reply)) { - memcpy(msg->message, &error_reply, sizeof(error_reply)); - goto out; - } - t86r = reply->message; - if (t86r->hdr.type == TYPE86_RSP_CODE && - t86r->cprbx.cprb_ver_id == 0x02) { - switch (resp_type->type) { - case PCIXCC_RESPONSE_TYPE_ICA: - length = sizeof(struct type86x_reply) - + t86r->length - 2; - length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length); - memcpy(msg->message, reply->message, length); - break; - case PCIXCC_RESPONSE_TYPE_XCRB: - length = t86r->fmt2.offset2 + t86r->fmt2.count2; - length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length); - memcpy(msg->message, reply->message, length); - break; - default: - memcpy(msg->message, &error_reply, sizeof error_reply); - } - } else - memcpy(msg->message, reply->message, sizeof error_reply); -out: - complete(&(resp_type->work)); -} - -static atomic_t zcrypt_step = ATOMIC_INIT(0); - -/** - * The request distributor calls this function if it picked the PCIXCC/CEX2C - * device to handle a modexpo request. - * @zdev: pointer to zcrypt_device structure that identifies the - * PCIXCC/CEX2C device to the request distributor - * @mex: pointer to the modexpo request buffer - */ -static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev, - struct ica_rsa_modexpo *mex) -{ - struct ap_message ap_msg; - struct response_type resp_type = { - .type = PCIXCC_RESPONSE_TYPE_ICA, - }; - int rc; - - ap_init_message(&ap_msg); - ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); - if (!ap_msg.message) - return -ENOMEM; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &resp_type; - rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex); - if (rc) - goto out_free; - init_completion(&resp_type.work); - ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible(&resp_type.work); - if (rc == 0) - rc = convert_response_ica(zdev, &ap_msg, mex->outputdata, - mex->outputdatalength); - else - /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); -out_free: - free_page((unsigned long) ap_msg.message); - return rc; -} - -/** - * The request distributor calls this function if it picked the PCIXCC/CEX2C - * device to handle a modexpo_crt request. - * @zdev: pointer to zcrypt_device structure that identifies the - * PCIXCC/CEX2C device to the request distributor - * @crt: pointer to the modexpoc_crt request buffer - */ -static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev, - struct ica_rsa_modexpo_crt *crt) -{ - struct ap_message ap_msg; - struct response_type resp_type = { - .type = PCIXCC_RESPONSE_TYPE_ICA, - }; - int rc; - - ap_init_message(&ap_msg); - ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); - if (!ap_msg.message) - return -ENOMEM; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &resp_type; - rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt); - if (rc) - goto out_free; - init_completion(&resp_type.work); - ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible(&resp_type.work); - if (rc == 0) - rc = convert_response_ica(zdev, &ap_msg, crt->outputdata, - crt->outputdatalength); - else - /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); -out_free: - free_page((unsigned long) ap_msg.message); - return rc; -} - -/** - * The request distributor calls this function if it picked the PCIXCC/CEX2C - * device to handle a send_cprb request. - * @zdev: pointer to zcrypt_device structure that identifies the - * PCIXCC/CEX2C device to the request distributor - * @xcRB: pointer to the send_cprb request buffer - */ -static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, - struct ica_xcRB *xcRB) -{ - struct ap_message ap_msg; - struct response_type resp_type = { - .type = PCIXCC_RESPONSE_TYPE_XCRB, - }; - int rc; - - ap_init_message(&ap_msg); - ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); - if (!ap_msg.message) - return -ENOMEM; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &resp_type; - rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB); - if (rc) - goto out_free; - init_completion(&resp_type.work); - ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible(&resp_type.work); - if (rc == 0) - rc = convert_response_xcrb(zdev, &ap_msg, xcRB); - else - /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); -out_free: - kzfree(ap_msg.message); - return rc; -} - -/** - * The request distributor calls this function if it picked the PCIXCC/CEX2C - * device to generate random data. - * @zdev: pointer to zcrypt_device structure that identifies the - * PCIXCC/CEX2C device to the request distributor - * @buffer: pointer to a memory page to return random data - */ - -static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev, - char *buffer) -{ - struct ap_message ap_msg; - struct response_type resp_type = { - .type = PCIXCC_RESPONSE_TYPE_XCRB, - }; - int rc; - - ap_init_message(&ap_msg); - ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); - if (!ap_msg.message) - return -ENOMEM; - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + - atomic_inc_return(&zcrypt_step); - ap_msg.private = &resp_type; - rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE); - init_completion(&resp_type.work); - ap_queue_message(zdev->ap_dev, &ap_msg); - rc = wait_for_completion_interruptible(&resp_type.work); - if (rc == 0) - rc = convert_response_rng(zdev, &ap_msg, buffer); - else - /* Signal pending. */ - ap_cancel_message(zdev->ap_dev, &ap_msg); - kfree(ap_msg.message); - return rc; -} - -/** - * The crypto operations for a PCIXCC/CEX2C card. - */ -static struct zcrypt_ops zcrypt_pcixcc_ops = { - .rsa_modexpo = zcrypt_pcixcc_modexpo, - .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt, - .send_cprb = zcrypt_pcixcc_send_cprb, -}; - -static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = { - .rsa_modexpo = zcrypt_pcixcc_modexpo, - .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt, - .send_cprb = zcrypt_pcixcc_send_cprb, - .rng = zcrypt_pcixcc_rng, -}; - -/** * Micro-code detection function. Its sends a message to a pcixcc card * to find out the microcode level. * @ap_dev: pointer to the AP device. @@ -1033,7 +269,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) struct zcrypt_device *zdev; int rc = 0; - zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE); + zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); if (!zdev) return -ENOMEM; zdev->ap_dev = ap_dev; @@ -1051,11 +287,13 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING; zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; + zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; } else { zdev->type_string = "PCIXCC_MCL3"; zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING; zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; + zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; } break; case AP_DEVICE_TYPE_CEX2C: @@ -1064,6 +302,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) zdev->speed_rating = CEX2C_SPEED_RATING; zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; + zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; break; case AP_DEVICE_TYPE_CEX3C: zdev->user_space_type = ZCRYPT_CEX3C; @@ -1071,6 +310,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) zdev->speed_rating = CEX3C_SPEED_RATING; zdev->min_mod_size = CEX3C_MIN_MOD_SIZE; zdev->max_mod_size = CEX3C_MAX_MOD_SIZE; + zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; break; default: goto out_free; @@ -1082,9 +322,11 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) return rc; } if (rc) - zdev->ops = &zcrypt_pcixcc_with_rng_ops; + zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_DEFAULT); else - zdev->ops = &zcrypt_pcixcc_ops; + zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_NORNG); ap_dev->reply = &zdev->reply; ap_dev->private = zdev; rc = zcrypt_device_register(zdev); @@ -1094,6 +336,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) out_free: ap_dev->private = NULL; + zcrypt_msgtype_release(zdev->ops); zcrypt_device_free(zdev); return rc; } @@ -1105,8 +348,10 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) static void zcrypt_pcixcc_remove(struct ap_device *ap_dev) { struct zcrypt_device *zdev = ap_dev->private; + struct zcrypt_ops *zops = zdev->ops; zcrypt_device_unregister(zdev); + zcrypt_msgtype_release(zops); } int __init zcrypt_pcixcc_init(void) @@ -1119,7 +364,5 @@ void zcrypt_pcixcc_exit(void) ap_driver_unregister(&zcrypt_pcixcc_driver); } -#ifndef CONFIG_ZCRYPT_MONOLITHIC module_init(zcrypt_pcixcc_init); module_exit(zcrypt_pcixcc_exit); -#endif diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h index 8cb7d7a6973..eacafc8962f 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.h +++ b/drivers/s390/crypto/zcrypt_pcixcc.h @@ -1,14 +1,13 @@ /* - * linux/drivers/s390/crypto/zcrypt_pcixcc.h - * * zcrypt 2.1.0 * - * Copyright (C) 2001, 2006 IBM Corporation + * Copyright IBM Corp. 2001, 2012 * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile index 0815690ac1e..241891a57ca 100644 --- a/drivers/s390/kvm/Makefile +++ b/drivers/s390/kvm/Makefile @@ -6,4 +6,4 @@ # it under the terms of the GNU General Public License (version 2 only) # as published by the Free Software Foundation. -obj-$(CONFIG_S390_GUEST) += kvm_virtio.o +obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index b2fc4fd63f7..a1349653c6d 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -1,5 +1,5 @@ /* - * kvm_virtio.c - virtio for kvm on s390 + * virtio for kvm on s390 * * Copyright IBM Corp. 2008 * @@ -10,20 +10,24 @@ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> */ +#include <linux/kernel_stat.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/err.h> #include <linux/virtio.h> #include <linux/virtio_config.h> +#include <linux/slab.h> #include <linux/virtio_console.h> #include <linux/interrupt.h> #include <linux/virtio_ring.h> +#include <linux/export.h> #include <linux/pfn.h> #include <asm/io.h> #include <asm/kvm_para.h> #include <asm/kvm_virtio.h> +#include <asm/sclp.h> #include <asm/setup.h> -#include <asm/s390_ext.h> +#include <asm/irq.h> #define VIRTIO_SUBCODE_64 0x0D00 @@ -31,6 +35,7 @@ * The pointer to our (page) of device descriptions. */ static void *kvm_devices; +static struct work_struct hotplug_work; struct kvm_device { struct virtio_device vdev; @@ -161,11 +166,15 @@ static void kvm_reset(struct virtio_device *vdev) * make a hypercall. We hand the address of the virtqueue so the Host * knows which virtqueue we're talking about. */ -static void kvm_notify(struct virtqueue *vq) +static bool kvm_notify(struct virtqueue *vq) { + long rc; struct kvm_vqconfig *config = vq->priv; - kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address); + rc = kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address); + if (rc < 0) + return false; + return true; } /* @@ -185,6 +194,9 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, if (index >= kdev->desc->num_vq) return ERR_PTR(-ENOENT); + if (!name) + return NULL; + config = kvm_vq_config(kdev->desc)+index; err = vmem_add_mapping(config->address, @@ -193,8 +205,8 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, if (err) goto out; - vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, - vdev, (void *) config->address, + vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN, + vdev, true, (void *) config->address, kvm_notify, callback, name); if (!vq) { err = -ENOMEM; @@ -259,10 +271,15 @@ error: return PTR_ERR(vqs[i]); } +static const char *kvm_bus_name(struct virtio_device *vdev) +{ + return ""; +} + /* * The config ops structure as defined by virtio config */ -static struct virtio_config_ops kvm_vq_configspace_ops = { +static const struct virtio_config_ops kvm_vq_configspace_ops = { .get_features = kvm_get_features, .finalize_features = kvm_finalize_features, .get = kvm_get, @@ -272,6 +289,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = { .reset = kvm_reset, .find_vqs = kvm_find_vqs, .del_vqs = kvm_del_vqs, + .bus_name = kvm_bus_name, }; /* @@ -327,63 +345,139 @@ static void scan_devices(void) } /* + * match for a kvm device with a specific desc pointer + */ +static int match_desc(struct device *dev, void *data) +{ + struct virtio_device *vdev = dev_to_virtio(dev); + struct kvm_device *kdev = to_kvmdev(vdev); + + return kdev->desc == data; +} + +/* + * hotplug_device tries to find changes in the device page. + */ +static void hotplug_devices(struct work_struct *dummy) +{ + unsigned int i; + struct kvm_device_desc *d; + struct device *dev; + + for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { + d = kvm_devices + i; + + /* end of list */ + if (d->type == 0) + break; + + /* device already exists */ + dev = device_find_child(kvm_root, d, match_desc); + if (dev) { + /* XXX check for hotplug remove */ + put_device(dev); + continue; + } + + /* new device */ + printk(KERN_INFO "Adding new virtio device %p\n", d); + add_kvm_device(d, i); + } +} + +/* * we emulate the request_irq behaviour on top of s390 extints */ -static void kvm_extint_handler(u16 code) +static void kvm_extint_handler(struct ext_code ext_code, + unsigned int param32, unsigned long param64) { struct virtqueue *vq; - u16 subcode; - int config_changed; + u32 param; - subcode = S390_lowcore.cpu_addr; - if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) + if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64) return; + inc_irq_stat(IRQEXT_VRT); /* The LSB might be overloaded, we have to mask it */ - vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL); + vq = (struct virtqueue *)(param64 & ~1UL); - /* We use the LSB of extparam, to decide, if this interrupt is a config - * change or a "standard" interrupt */ - config_changed = S390_lowcore.ext_params & 1; + /* We use ext_params to decide what this interrupt means */ + param = param32 & VIRTIO_PARAM_MASK; - if (config_changed) { + switch (param) { + case VIRTIO_PARAM_CONFIG_CHANGED: + { struct virtio_driver *drv; drv = container_of(vq->vdev->dev.driver, struct virtio_driver, driver); if (drv->config_changed) drv->config_changed(vq->vdev); - } else + + break; + } + case VIRTIO_PARAM_DEV_ADD: + schedule_work(&hotplug_work); + break; + case VIRTIO_PARAM_VRING_INTERRUPT: + default: vring_interrupt(0, vq); + break; + } } /* + * For s390-virtio, we expect a page above main storage containing + * the virtio configuration. Try to actually load from this area + * in order to figure out if the host provides this page. + */ +static int __init test_devices_support(unsigned long addr) +{ + int ret = -EIO; + + asm volatile( + "0: lura 0,%1\n" + "1: xgr %0,%0\n" + "2:\n" + EX_TABLE(0b,2b) + EX_TABLE(1b,2b) + : "+d" (ret) + : "a" (addr) + : "0", "cc"); + return ret; +} +/* * Init function for virtio - * devices are in a single page above top of "normal" mem + * devices are in a single page above top of "normal" + standby mem */ static int __init kvm_devices_init(void) { int rc; + unsigned long total_memory_size = sclp_get_rzm() * sclp_get_rnmax(); if (!MACHINE_IS_KVM) return -ENODEV; + if (test_devices_support(total_memory_size) < 0) + return -ENODEV; + + rc = vmem_add_mapping(total_memory_size, PAGE_SIZE); + if (rc) + return rc; + + kvm_devices = (void *) total_memory_size; + kvm_root = root_device_register("kvm_s390"); if (IS_ERR(kvm_root)) { rc = PTR_ERR(kvm_root); printk(KERN_ERR "Could not register kvm_s390 root device"); + vmem_remove_mapping(total_memory_size, PAGE_SIZE); return rc; } - rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); - if (rc) { - root_device_unregister(kvm_root); - return rc; - } + INIT_WORK(&hotplug_work, hotplug_devices); - kvm_devices = (void *) real_memory_size; - - ctl_set_bit(0, 9); - register_external_interrupt(0x2603, kvm_extint_handler); + irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); + register_external_irq(EXT_IRQ_CP_SERVICE, kvm_extint_handler); scan_devices(); return 0; @@ -405,7 +499,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) static int __init s390_virtio_console_init(void) { - if (!MACHINE_IS_KVM) + if (sclp_has_vt220() || sclp_has_linemode()) return -ENODEV; return virtio_cons_early_init(early_put_chars); } diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c new file mode 100644 index 00000000000..d2c0b442bce --- /dev/null +++ b/drivers/s390/kvm/virtio_ccw.c @@ -0,0 +1,1254 @@ +/* + * ccw based virtio transport + * + * Copyright IBM Corp. 2012, 2014 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + */ + +#include <linux/kernel_stat.h> +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/err.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/virtio_ring.h> +#include <linux/pfn.h> +#include <linux/async.h> +#include <linux/wait.h> +#include <linux/list.h> +#include <linux/bitops.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/kvm_para.h> +#include <linux/notifier.h> +#include <asm/setup.h> +#include <asm/irq.h> +#include <asm/cio.h> +#include <asm/ccwdev.h> +#include <asm/virtio-ccw.h> +#include <asm/isc.h> +#include <asm/airq.h> + +/* + * virtio related functions + */ + +struct vq_config_block { + __u16 index; + __u16 num; +} __packed; + +#define VIRTIO_CCW_CONFIG_SIZE 0x100 +/* same as PCI config space size, should be enough for all drivers */ + +struct virtio_ccw_device { + struct virtio_device vdev; + __u8 *status; + __u8 config[VIRTIO_CCW_CONFIG_SIZE]; + struct ccw_device *cdev; + __u32 curr_io; + int err; + wait_queue_head_t wait_q; + spinlock_t lock; + struct list_head virtqueues; + unsigned long indicators; + unsigned long indicators2; + struct vq_config_block *config_block; + bool is_thinint; + bool going_away; + bool device_lost; + void *airq_info; +}; + +struct vq_info_block { + __u64 queue; + __u32 align; + __u16 index; + __u16 num; +} __packed; + +struct virtio_feature_desc { + __u32 features; + __u8 index; +} __packed; + +struct virtio_thinint_area { + unsigned long summary_indicator; + unsigned long indicator; + u64 bit_nr; + u8 isc; +} __packed; + +struct virtio_ccw_vq_info { + struct virtqueue *vq; + int num; + void *queue; + struct vq_info_block *info_block; + int bit_nr; + struct list_head node; + long cookie; +}; + +#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */ + +#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8) +#define MAX_AIRQ_AREAS 20 + +static int virtio_ccw_use_airq = 1; + +struct airq_info { + rwlock_t lock; + u8 summary_indicator; + struct airq_struct airq; + struct airq_iv *aiv; +}; +static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; + +#define CCW_CMD_SET_VQ 0x13 +#define CCW_CMD_VDEV_RESET 0x33 +#define CCW_CMD_SET_IND 0x43 +#define CCW_CMD_SET_CONF_IND 0x53 +#define CCW_CMD_READ_FEAT 0x12 +#define CCW_CMD_WRITE_FEAT 0x11 +#define CCW_CMD_READ_CONF 0x22 +#define CCW_CMD_WRITE_CONF 0x21 +#define CCW_CMD_WRITE_STATUS 0x31 +#define CCW_CMD_READ_VQ_CONF 0x32 +#define CCW_CMD_SET_IND_ADAPTER 0x73 + +#define VIRTIO_CCW_DOING_SET_VQ 0x00010000 +#define VIRTIO_CCW_DOING_RESET 0x00040000 +#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000 +#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000 +#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000 +#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000 +#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000 +#define VIRTIO_CCW_DOING_SET_IND 0x01000000 +#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000 +#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 +#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 +#define VIRTIO_CCW_INTPARM_MASK 0xffff0000 + +static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) +{ + return container_of(vdev, struct virtio_ccw_device, vdev); +} + +static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info) +{ + unsigned long i, flags; + + write_lock_irqsave(&info->lock, flags); + for (i = 0; i < airq_iv_end(info->aiv); i++) { + if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) { + airq_iv_free_bit(info->aiv, i); + airq_iv_set_ptr(info->aiv, i, 0); + break; + } + } + write_unlock_irqrestore(&info->lock, flags); +} + +static void virtio_airq_handler(struct airq_struct *airq) +{ + struct airq_info *info = container_of(airq, struct airq_info, airq); + unsigned long ai; + + inc_irq_stat(IRQIO_VAI); + read_lock(&info->lock); + /* Walk through indicators field, summary indicator active. */ + for (ai = 0;;) { + ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); + if (ai == -1UL) + break; + vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); + } + info->summary_indicator = 0; + smp_wmb(); + /* Walk through indicators field, summary indicator not active. */ + for (ai = 0;;) { + ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); + if (ai == -1UL) + break; + vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); + } + read_unlock(&info->lock); +} + +static struct airq_info *new_airq_info(void) +{ + struct airq_info *info; + int rc; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return NULL; + rwlock_init(&info->lock); + info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR); + if (!info->aiv) { + kfree(info); + return NULL; + } + info->airq.handler = virtio_airq_handler; + info->airq.lsi_ptr = &info->summary_indicator; + info->airq.lsi_mask = 0xff; + info->airq.isc = VIRTIO_AIRQ_ISC; + rc = register_adapter_interrupt(&info->airq); + if (rc) { + airq_iv_release(info->aiv); + kfree(info); + return NULL; + } + return info; +} + +static void destroy_airq_info(struct airq_info *info) +{ + if (!info) + return; + + unregister_adapter_interrupt(&info->airq); + airq_iv_release(info->aiv); + kfree(info); +} + +static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, + u64 *first, void **airq_info) +{ + int i, j; + struct airq_info *info; + unsigned long indicator_addr = 0; + unsigned long bit, flags; + + for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { + if (!airq_areas[i]) + airq_areas[i] = new_airq_info(); + info = airq_areas[i]; + if (!info) + return 0; + write_lock_irqsave(&info->lock, flags); + bit = airq_iv_alloc(info->aiv, nvqs); + if (bit == -1UL) { + /* Not enough vacancies. */ + write_unlock_irqrestore(&info->lock, flags); + continue; + } + *first = bit; + *airq_info = info; + indicator_addr = (unsigned long)info->aiv->vector; + for (j = 0; j < nvqs; j++) { + airq_iv_set_ptr(info->aiv, bit + j, + (unsigned long)vqs[j]); + } + write_unlock_irqrestore(&info->lock, flags); + } + return indicator_addr; +} + +static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev) +{ + struct virtio_ccw_vq_info *info; + + list_for_each_entry(info, &vcdev->virtqueues, node) + drop_airq_indicator(info->vq, vcdev->airq_info); +} + +static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag) +{ + unsigned long flags; + __u32 ret; + + spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); + if (vcdev->err) + ret = 0; + else + ret = vcdev->curr_io & flag; + spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); + return ret; +} + +static int ccw_io_helper(struct virtio_ccw_device *vcdev, + struct ccw1 *ccw, __u32 intparm) +{ + int ret; + unsigned long flags; + int flag = intparm & VIRTIO_CCW_INTPARM_MASK; + + do { + spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); + ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); + if (!ret) { + if (!vcdev->curr_io) + vcdev->err = 0; + vcdev->curr_io |= flag; + } + spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); + cpu_relax(); + } while (ret == -EBUSY); + wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); + return ret ? ret : vcdev->err; +} + +static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, + struct ccw1 *ccw) +{ + int ret; + unsigned long *indicatorp = NULL; + struct virtio_thinint_area *thinint_area = NULL; + struct airq_info *airq_info = vcdev->airq_info; + + if (vcdev->is_thinint) { + thinint_area = kzalloc(sizeof(*thinint_area), + GFP_DMA | GFP_KERNEL); + if (!thinint_area) + return; + thinint_area->summary_indicator = + (unsigned long) &airq_info->summary_indicator; + thinint_area->isc = VIRTIO_AIRQ_ISC; + ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; + ccw->count = sizeof(*thinint_area); + ccw->cda = (__u32)(unsigned long) thinint_area; + } else { + indicatorp = kmalloc(sizeof(&vcdev->indicators), + GFP_DMA | GFP_KERNEL); + if (!indicatorp) + return; + *indicatorp = 0; + ccw->cmd_code = CCW_CMD_SET_IND; + ccw->count = sizeof(vcdev->indicators); + ccw->cda = (__u32)(unsigned long) indicatorp; + } + /* Deregister indicators from host. */ + vcdev->indicators = 0; + ccw->flags = 0; + ret = ccw_io_helper(vcdev, ccw, + vcdev->is_thinint ? + VIRTIO_CCW_DOING_SET_IND_ADAPTER : + VIRTIO_CCW_DOING_SET_IND); + if (ret && (ret != -ENODEV)) + dev_info(&vcdev->cdev->dev, + "Failed to deregister indicators (%d)\n", ret); + else if (vcdev->is_thinint) + virtio_ccw_drop_indicators(vcdev); + kfree(indicatorp); + kfree(thinint_area); +} + +static inline long do_kvm_notify(struct subchannel_id schid, + unsigned long queue_index, + long cookie) +{ + register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; + register struct subchannel_id __schid asm("2") = schid; + register unsigned long __index asm("3") = queue_index; + register long __rc asm("2"); + register long __cookie asm("4") = cookie; + + asm volatile ("diag 2,4,0x500\n" + : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index), + "d"(__cookie) + : "memory", "cc"); + return __rc; +} + +static bool virtio_ccw_kvm_notify(struct virtqueue *vq) +{ + struct virtio_ccw_vq_info *info = vq->priv; + struct virtio_ccw_device *vcdev; + struct subchannel_id schid; + + vcdev = to_vc_device(info->vq->vdev); + ccw_device_get_schid(vcdev->cdev, &schid); + info->cookie = do_kvm_notify(schid, vq->index, info->cookie); + if (info->cookie < 0) + return false; + return true; +} + +static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, + struct ccw1 *ccw, int index) +{ + vcdev->config_block->index = index; + ccw->cmd_code = CCW_CMD_READ_VQ_CONF; + ccw->flags = 0; + ccw->count = sizeof(struct vq_config_block); + ccw->cda = (__u32)(unsigned long)(vcdev->config_block); + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); + return vcdev->config_block->num; +} + +static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); + struct virtio_ccw_vq_info *info = vq->priv; + unsigned long flags; + unsigned long size; + int ret; + unsigned int index = vq->index; + + /* Remove from our list. */ + spin_lock_irqsave(&vcdev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vcdev->lock, flags); + + /* Release from host. */ + info->info_block->queue = 0; + info->info_block->align = 0; + info->info_block->index = index; + info->info_block->num = 0; + ccw->cmd_code = CCW_CMD_SET_VQ; + ccw->flags = 0; + ccw->count = sizeof(*info->info_block); + ccw->cda = (__u32)(unsigned long)(info->info_block); + ret = ccw_io_helper(vcdev, ccw, + VIRTIO_CCW_DOING_SET_VQ | index); + /* + * -ENODEV isn't considered an error: The device is gone anyway. + * This may happen on device detach. + */ + if (ret && (ret != -ENODEV)) + dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d", + ret, index); + + vring_del_virtqueue(vq); + size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); + free_pages_exact(info->queue, size); + kfree(info->info_block); + kfree(info); +} + +static void virtio_ccw_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + struct ccw1 *ccw; + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; + + virtio_ccw_drop_indicator(vcdev, ccw); + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + virtio_ccw_del_vq(vq, ccw); + + kfree(ccw); +} + +static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, + int i, vq_callback_t *callback, + const char *name, + struct ccw1 *ccw) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + int err; + struct virtqueue *vq = NULL; + struct virtio_ccw_vq_info *info; + unsigned long size = 0; /* silence the compiler */ + unsigned long flags; + + /* Allocate queue. */ + info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); + if (!info) { + dev_warn(&vcdev->cdev->dev, "no info\n"); + err = -ENOMEM; + goto out_err; + } + info->info_block = kzalloc(sizeof(*info->info_block), + GFP_DMA | GFP_KERNEL); + if (!info->info_block) { + dev_warn(&vcdev->cdev->dev, "no info block\n"); + err = -ENOMEM; + goto out_err; + } + info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); + size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); + info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); + if (info->queue == NULL) { + dev_warn(&vcdev->cdev->dev, "no queue\n"); + err = -ENOMEM; + goto out_err; + } + + vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev, + true, info->queue, virtio_ccw_kvm_notify, + callback, name); + if (!vq) { + /* For now, we fail if we can't get the requested size. */ + dev_warn(&vcdev->cdev->dev, "no vq\n"); + err = -ENOMEM; + goto out_err; + } + + /* Register it with the host. */ + info->info_block->queue = (__u64)info->queue; + info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN; + info->info_block->index = i; + info->info_block->num = info->num; + ccw->cmd_code = CCW_CMD_SET_VQ; + ccw->flags = 0; + ccw->count = sizeof(*info->info_block); + ccw->cda = (__u32)(unsigned long)(info->info_block); + err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); + if (err) { + dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); + goto out_err; + } + + info->vq = vq; + vq->priv = info; + + /* Save it to our list. */ + spin_lock_irqsave(&vcdev->lock, flags); + list_add(&info->node, &vcdev->virtqueues); + spin_unlock_irqrestore(&vcdev->lock, flags); + + return vq; + +out_err: + if (vq) + vring_del_virtqueue(vq); + if (info) { + if (info->queue) + free_pages_exact(info->queue, size); + kfree(info->info_block); + } + kfree(info); + return ERR_PTR(err); +} + +static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, + struct virtqueue *vqs[], int nvqs, + struct ccw1 *ccw) +{ + int ret; + struct virtio_thinint_area *thinint_area = NULL; + struct airq_info *info; + + thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL); + if (!thinint_area) { + ret = -ENOMEM; + goto out; + } + /* Try to get an indicator. */ + thinint_area->indicator = get_airq_indicator(vqs, nvqs, + &thinint_area->bit_nr, + &vcdev->airq_info); + if (!thinint_area->indicator) { + ret = -ENOSPC; + goto out; + } + info = vcdev->airq_info; + thinint_area->summary_indicator = + (unsigned long) &info->summary_indicator; + thinint_area->isc = VIRTIO_AIRQ_ISC; + ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; + ccw->flags = CCW_FLAG_SLI; + ccw->count = sizeof(*thinint_area); + ccw->cda = (__u32)(unsigned long)thinint_area; + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER); + if (ret) { + if (ret == -EOPNOTSUPP) { + /* + * The host does not support adapter interrupts + * for virtio-ccw, stop trying. + */ + virtio_ccw_use_airq = 0; + pr_info("Adapter interrupts unsupported on host\n"); + } else + dev_warn(&vcdev->cdev->dev, + "enabling adapter interrupts = %d\n", ret); + virtio_ccw_drop_indicators(vcdev); + } +out: + kfree(thinint_area); + return ret; +} + +static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + unsigned long *indicatorp = NULL; + int ret, i; + struct ccw1 *ccw; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return -ENOMEM; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], + ccw); + if (IS_ERR(vqs[i])) { + ret = PTR_ERR(vqs[i]); + vqs[i] = NULL; + goto out; + } + } + ret = -ENOMEM; + /* We need a data area under 2G to communicate. */ + indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL); + if (!indicatorp) + goto out; + *indicatorp = (unsigned long) &vcdev->indicators; + if (vcdev->is_thinint) { + ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); + if (ret) + /* no error, just fall back to legacy interrupts */ + vcdev->is_thinint = 0; + } + if (!vcdev->is_thinint) { + /* Register queue indicators with host. */ + vcdev->indicators = 0; + ccw->cmd_code = CCW_CMD_SET_IND; + ccw->flags = 0; + ccw->count = sizeof(vcdev->indicators); + ccw->cda = (__u32)(unsigned long) indicatorp; + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); + if (ret) + goto out; + } + /* Register indicators2 with host for config changes */ + *indicatorp = (unsigned long) &vcdev->indicators2; + vcdev->indicators2 = 0; + ccw->cmd_code = CCW_CMD_SET_CONF_IND; + ccw->flags = 0; + ccw->count = sizeof(vcdev->indicators2); + ccw->cda = (__u32)(unsigned long) indicatorp; + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); + if (ret) + goto out; + + kfree(indicatorp); + kfree(ccw); + return 0; +out: + kfree(indicatorp); + kfree(ccw); + virtio_ccw_del_vqs(vdev); + return ret; +} + +static void virtio_ccw_reset(struct virtio_device *vdev) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct ccw1 *ccw; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; + + /* Zero status bits. */ + *vcdev->status = 0; + + /* Send a reset ccw on device. */ + ccw->cmd_code = CCW_CMD_VDEV_RESET; + ccw->flags = 0; + ccw->count = 0; + ccw->cda = 0; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET); + kfree(ccw); +} + +static u32 virtio_ccw_get_features(struct virtio_device *vdev) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct virtio_feature_desc *features; + int ret, rc; + struct ccw1 *ccw; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return 0; + + features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); + if (!features) { + rc = 0; + goto out_free; + } + /* Read the feature bits from the host. */ + /* TODO: Features > 32 bits */ + features->index = 0; + ccw->cmd_code = CCW_CMD_READ_FEAT; + ccw->flags = 0; + ccw->count = sizeof(*features); + ccw->cda = (__u32)(unsigned long)features; + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); + if (ret) { + rc = 0; + goto out_free; + } + + rc = le32_to_cpu(features->features); + +out_free: + kfree(features); + kfree(ccw); + return rc; +} + +static void virtio_ccw_finalize_features(struct virtio_device *vdev) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct virtio_feature_desc *features; + int i; + struct ccw1 *ccw; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; + + features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); + if (!features) + goto out_free; + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features); + i++) { + int highbits = i % 2 ? 32 : 0; + features->index = i; + features->features = cpu_to_le32(vdev->features[i / 2] + >> highbits); + /* Write the feature bits to the host. */ + ccw->cmd_code = CCW_CMD_WRITE_FEAT; + ccw->flags = 0; + ccw->count = sizeof(*features); + ccw->cda = (__u32)(unsigned long)features; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); + } +out_free: + kfree(features); + kfree(ccw); +} + +static void virtio_ccw_get_config(struct virtio_device *vdev, + unsigned int offset, void *buf, unsigned len) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + int ret; + struct ccw1 *ccw; + void *config_area; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; + + config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); + if (!config_area) + goto out_free; + + /* Read the config area from the host. */ + ccw->cmd_code = CCW_CMD_READ_CONF; + ccw->flags = 0; + ccw->count = offset + len; + ccw->cda = (__u32)(unsigned long)config_area; + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG); + if (ret) + goto out_free; + + memcpy(vcdev->config, config_area, sizeof(vcdev->config)); + memcpy(buf, &vcdev->config[offset], len); + +out_free: + kfree(config_area); + kfree(ccw); +} + +static void virtio_ccw_set_config(struct virtio_device *vdev, + unsigned int offset, const void *buf, + unsigned len) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct ccw1 *ccw; + void *config_area; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; + + config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); + if (!config_area) + goto out_free; + + memcpy(&vcdev->config[offset], buf, len); + /* Write the config area to the host. */ + memcpy(config_area, vcdev->config, sizeof(vcdev->config)); + ccw->cmd_code = CCW_CMD_WRITE_CONF; + ccw->flags = 0; + ccw->count = offset + len; + ccw->cda = (__u32)(unsigned long)config_area; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); + +out_free: + kfree(config_area); + kfree(ccw); +} + +static u8 virtio_ccw_get_status(struct virtio_device *vdev) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + + return *vcdev->status; +} + +static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct ccw1 *ccw; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return; + + /* Write the status to the host. */ + *vcdev->status = status; + ccw->cmd_code = CCW_CMD_WRITE_STATUS; + ccw->flags = 0; + ccw->count = sizeof(status); + ccw->cda = (__u32)(unsigned long)vcdev->status; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); + kfree(ccw); +} + +static struct virtio_config_ops virtio_ccw_config_ops = { + .get_features = virtio_ccw_get_features, + .finalize_features = virtio_ccw_finalize_features, + .get = virtio_ccw_get_config, + .set = virtio_ccw_set_config, + .get_status = virtio_ccw_get_status, + .set_status = virtio_ccw_set_status, + .reset = virtio_ccw_reset, + .find_vqs = virtio_ccw_find_vqs, + .del_vqs = virtio_ccw_del_vqs, +}; + + +/* + * ccw bus driver related functions + */ + +static void virtio_ccw_release_dev(struct device *_d) +{ + struct virtio_device *dev = container_of(_d, struct virtio_device, + dev); + struct virtio_ccw_device *vcdev = to_vc_device(dev); + + kfree(vcdev->status); + kfree(vcdev->config_block); + kfree(vcdev); +} + +static int irb_is_error(struct irb *irb) +{ + if (scsw_cstat(&irb->scsw) != 0) + return 1; + if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) + return 1; + if (scsw_cc(&irb->scsw) != 0) + return 1; + return 0; +} + +static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, + int index) +{ + struct virtio_ccw_vq_info *info; + unsigned long flags; + struct virtqueue *vq; + + vq = NULL; + spin_lock_irqsave(&vcdev->lock, flags); + list_for_each_entry(info, &vcdev->virtqueues, node) { + if (info->vq->index == index) { + vq = info->vq; + break; + } + } + spin_unlock_irqrestore(&vcdev->lock, flags); + return vq; +} + +static void virtio_ccw_int_handler(struct ccw_device *cdev, + unsigned long intparm, + struct irb *irb) +{ + __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; + struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); + int i; + struct virtqueue *vq; + struct virtio_driver *drv; + + if (!vcdev) + return; + /* Check if it's a notification from the host. */ + if ((intparm == 0) && + (scsw_stctl(&irb->scsw) == + (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { + /* OK */ + } + if (irb_is_error(irb)) { + /* Command reject? */ + if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && + (irb->ecw[0] & SNS0_CMD_REJECT)) + vcdev->err = -EOPNOTSUPP; + else + /* Map everything else to -EIO. */ + vcdev->err = -EIO; + } + if (vcdev->curr_io & activity) { + switch (activity) { + case VIRTIO_CCW_DOING_READ_FEAT: + case VIRTIO_CCW_DOING_WRITE_FEAT: + case VIRTIO_CCW_DOING_READ_CONFIG: + case VIRTIO_CCW_DOING_WRITE_CONFIG: + case VIRTIO_CCW_DOING_WRITE_STATUS: + case VIRTIO_CCW_DOING_SET_VQ: + case VIRTIO_CCW_DOING_SET_IND: + case VIRTIO_CCW_DOING_SET_CONF_IND: + case VIRTIO_CCW_DOING_RESET: + case VIRTIO_CCW_DOING_READ_VQ_CONF: + case VIRTIO_CCW_DOING_SET_IND_ADAPTER: + vcdev->curr_io &= ~activity; + wake_up(&vcdev->wait_q); + break; + default: + /* don't know what to do... */ + dev_warn(&cdev->dev, "Suspicious activity '%08x'\n", + activity); + WARN_ON(1); + break; + } + } + for_each_set_bit(i, &vcdev->indicators, + sizeof(vcdev->indicators) * BITS_PER_BYTE) { + /* The bit clear must happen before the vring kick. */ + clear_bit(i, &vcdev->indicators); + barrier(); + vq = virtio_ccw_vq_by_ind(vcdev, i); + vring_interrupt(0, vq); + } + if (test_bit(0, &vcdev->indicators2)) { + drv = container_of(vcdev->vdev.dev.driver, + struct virtio_driver, driver); + + if (drv && drv->config_changed) + drv->config_changed(&vcdev->vdev); + clear_bit(0, &vcdev->indicators2); + } +} + +/* + * We usually want to autoonline all devices, but give the admin + * a way to exempt devices from this. + */ +#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ + (8*sizeof(long))) +static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS]; + +static char *no_auto = ""; + +module_param(no_auto, charp, 0444); +MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined"); + +static int virtio_ccw_check_autoonline(struct ccw_device *cdev) +{ + struct ccw_dev_id id; + + ccw_device_get_id(cdev, &id); + if (test_bit(id.devno, devs_no_auto[id.ssid])) + return 0; + return 1; +} + +static void virtio_ccw_auto_online(void *data, async_cookie_t cookie) +{ + struct ccw_device *cdev = data; + int ret; + + ret = ccw_device_set_online(cdev); + if (ret) + dev_warn(&cdev->dev, "Failed to set online: %d\n", ret); +} + +static int virtio_ccw_probe(struct ccw_device *cdev) +{ + cdev->handler = virtio_ccw_int_handler; + + if (virtio_ccw_check_autoonline(cdev)) + async_schedule(virtio_ccw_auto_online, cdev); + return 0; +} + +static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev) +{ + unsigned long flags; + struct virtio_ccw_device *vcdev; + + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + vcdev = dev_get_drvdata(&cdev->dev); + if (!vcdev || vcdev->going_away) { + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + return NULL; + } + vcdev->going_away = true; + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + return vcdev; +} + +static void virtio_ccw_remove(struct ccw_device *cdev) +{ + unsigned long flags; + struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); + + if (vcdev && cdev->online) { + if (vcdev->device_lost) + virtio_break_device(&vcdev->vdev); + unregister_virtio_device(&vcdev->vdev); + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + dev_set_drvdata(&cdev->dev, NULL); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + } + cdev->handler = NULL; +} + +static int virtio_ccw_offline(struct ccw_device *cdev) +{ + unsigned long flags; + struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); + + if (!vcdev) + return 0; + if (vcdev->device_lost) + virtio_break_device(&vcdev->vdev); + unregister_virtio_device(&vcdev->vdev); + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + dev_set_drvdata(&cdev->dev, NULL); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + return 0; +} + + +static int virtio_ccw_online(struct ccw_device *cdev) +{ + int ret; + struct virtio_ccw_device *vcdev; + unsigned long flags; + + vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL); + if (!vcdev) { + dev_warn(&cdev->dev, "Could not get memory for virtio\n"); + ret = -ENOMEM; + goto out_free; + } + vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), + GFP_DMA | GFP_KERNEL); + if (!vcdev->config_block) { + ret = -ENOMEM; + goto out_free; + } + vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL); + if (!vcdev->status) { + ret = -ENOMEM; + goto out_free; + } + + vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */ + + vcdev->vdev.dev.parent = &cdev->dev; + vcdev->vdev.dev.release = virtio_ccw_release_dev; + vcdev->vdev.config = &virtio_ccw_config_ops; + vcdev->cdev = cdev; + init_waitqueue_head(&vcdev->wait_q); + INIT_LIST_HEAD(&vcdev->virtqueues); + spin_lock_init(&vcdev->lock); + + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + dev_set_drvdata(&cdev->dev, vcdev); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + vcdev->vdev.id.vendor = cdev->id.cu_type; + vcdev->vdev.id.device = cdev->id.cu_model; + ret = register_virtio_device(&vcdev->vdev); + if (ret) { + dev_warn(&cdev->dev, "Failed to register virtio device: %d\n", + ret); + goto out_put; + } + return 0; +out_put: + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + dev_set_drvdata(&cdev->dev, NULL); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + put_device(&vcdev->vdev.dev); + return ret; +out_free: + if (vcdev) { + kfree(vcdev->status); + kfree(vcdev->config_block); + } + kfree(vcdev); + return ret; +} + +static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) +{ + int rc; + struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); + + /* + * Make sure vcdev is set + * i.e. set_offline/remove callback not already running + */ + if (!vcdev) + return NOTIFY_DONE; + + switch (event) { + case CIO_GONE: + vcdev->device_lost = true; + rc = NOTIFY_DONE; + break; + default: + rc = NOTIFY_DONE; + break; + } + return rc; +} + +static struct ccw_device_id virtio_ids[] = { + { CCW_DEVICE(0x3832, 0) }, + {}, +}; +MODULE_DEVICE_TABLE(ccw, virtio_ids); + +static struct ccw_driver virtio_ccw_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "virtio_ccw", + }, + .ids = virtio_ids, + .probe = virtio_ccw_probe, + .remove = virtio_ccw_remove, + .set_offline = virtio_ccw_offline, + .set_online = virtio_ccw_online, + .notify = virtio_ccw_cio_notify, + .int_class = IRQIO_VIR, +}; + +static int __init pure_hex(char **cp, unsigned int *val, int min_digit, + int max_digit, int max_val) +{ + int diff; + + diff = 0; + *val = 0; + + while (diff <= max_digit) { + int value = hex_to_bin(**cp); + + if (value < 0) + break; + *val = *val * 16 + value; + (*cp)++; + diff++; + } + + if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) + return 1; + + return 0; +} + +static int __init parse_busid(char *str, unsigned int *cssid, + unsigned int *ssid, unsigned int *devno) +{ + char *str_work; + int rc, ret; + + rc = 1; + + if (*str == '\0') + goto out; + + str_work = str; + ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); + if (ret || (str_work[0] != '.')) + goto out; + str_work++; + ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); + if (ret || (str_work[0] != '.')) + goto out; + str_work++; + ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); + if (ret || (str_work[0] != '\0')) + goto out; + + rc = 0; +out: + return rc; +} + +static void __init no_auto_parse(void) +{ + unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; + char *parm, *str; + int rc; + + str = no_auto; + while ((parm = strsep(&str, ","))) { + rc = parse_busid(strsep(&parm, "-"), &from_cssid, + &from_ssid, &from); + if (rc) + continue; + if (parm != NULL) { + rc = parse_busid(parm, &to_cssid, + &to_ssid, &to); + if ((from_ssid > to_ssid) || + ((from_ssid == to_ssid) && (from > to))) + rc = -EINVAL; + } else { + to_cssid = from_cssid; + to_ssid = from_ssid; + to = from; + } + if (rc) + continue; + while ((from_ssid < to_ssid) || + ((from_ssid == to_ssid) && (from <= to))) { + set_bit(from, devs_no_auto[from_ssid]); + from++; + if (from > __MAX_SUBCHANNEL) { + from_ssid++; + from = 0; + } + } + } +} + +static int __init virtio_ccw_init(void) +{ + /* parse no_auto string before we do anything further */ + no_auto_parse(); + return ccw_driver_register(&virtio_ccw_driver); +} +module_init(virtio_ccw_init); + +static void __exit virtio_ccw_exit(void) +{ + int i; + + ccw_driver_unregister(&virtio_ccw_driver); + for (i = 0; i < MAX_AIRQ_AREAS; i++) + destroy_airq_info(airq_areas[i]); +} +module_exit(virtio_ccw_exit); diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 977bb4d4ed1..8b3f5599180 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -2,17 +2,18 @@ menu "S/390 network device drivers" depends on NETDEVICES && S390 config LCS - tristate "Lan Channel Station Interface" - depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI) + def_tristate m + prompt "Lan Channel Station Interface" + depends on CCW && NETDEVICES && (ETHERNET || FDDI) help Select this option if you want to use LCS networking on IBM System z. - This device driver supports Token Ring (IEEE 802.5), - FDDI (IEEE 802.7) and Ethernet. + This device driver supports FDDI (IEEE 802.7) and Ethernet. To compile as a module, choose M. The module name is lcs. If you do not know what it is, it's safe to choose Y. config CTCM - tristate "CTC and MPC SNA device support" + def_tristate m + prompt "CTC and MPC SNA device support" depends on CCW && NETDEVICES help Select this option if you want to use channel-to-channel @@ -26,7 +27,8 @@ config CTCM If you do not need any channel-to-channel connection, choose N. config NETIUCV - tristate "IUCV network device support (VM only)" + def_tristate m + prompt "IUCV network device support (VM only)" depends on IUCV && NETDEVICES help Select this option if you want to use inter-user communication @@ -37,14 +39,16 @@ config NETIUCV The module name is netiucv. If unsure, choose Y. config SMSGIUCV - tristate "IUCV special message support (VM only)" + def_tristate m + prompt "IUCV special message support (VM only)" depends on IUCV help Select this option if you want to be able to receive SMSG messages from other VM guest systems. config SMSGIUCV_EVENT - tristate "Deliver IUCV special messages as uevents (VM only)" + def_tristate m + prompt "Deliver IUCV special messages as uevents (VM only)" depends on SMSGIUCV help Select this option to deliver CP special messages (SMSGs) as @@ -54,7 +58,8 @@ config SMSGIUCV_EVENT To compile as a module, choose M. The module name is "smsgiucv_app". config CLAW - tristate "CLAW device support" + def_tristate m + prompt "CLAW device support" depends on CCW && NETDEVICES help This driver supports channel attached CLAW devices. @@ -64,12 +69,13 @@ config CLAW To compile into the kernel, choose Y. config QETH - tristate "Gigabit Ethernet device support" + def_tristate y + prompt "Gigabit Ethernet device support" depends on CCW && NETDEVICES && IP_MULTICAST && QDIO help This driver supports the IBM System z OSA Express adapters - in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN - interfaces in QDIO and HIPER mode. + in QDIO mode (all media types), HiperSockets interfaces and z/VM + virtual NICs for Guest LAN and VSWITCH. For details please refer to the documentation provided by IBM at <http://www.ibm.com/developerworks/linux/linux390> @@ -78,28 +84,28 @@ config QETH The module name is qeth. config QETH_L2 - tristate "qeth layer 2 device support" - depends on QETH - help - Select this option to be able to run qeth devices in layer 2 mode. - To compile as a module, choose M. The module name is qeth_l2. - If unsure, choose y. + def_tristate y + prompt "qeth layer 2 device support" + depends on QETH + help + Select this option to be able to run qeth devices in layer 2 mode. + To compile as a module, choose M. The module name is qeth_l2. + If unsure, choose y. config QETH_L3 - tristate "qeth layer 3 device support" - depends on QETH - help - Select this option to be able to run qeth devices in layer 3 mode. - To compile as a module choose M. The module name is qeth_l3. - If unsure, choose Y. + def_tristate y + prompt "qeth layer 3 device support" + depends on QETH + help + Select this option to be able to run qeth devices in layer 3 mode. + To compile as a module choose M. The module name is qeth_l3. + If unsure, choose Y. config QETH_IPV6 - bool - depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y') - default y + def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y') config CCWGROUP tristate - default (LCS || CTCM || QETH) + default (LCS || CTCM || QETH || CLAW) endmenu diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index 4dfe8c1092d..d28f05d0c75 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -11,7 +11,7 @@ obj-$(CONFIG_LCS) += lcs.o obj-$(CONFIG_CLAW) += claw.o qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o obj-$(CONFIG_QETH) += qeth.o -qeth_l2-y += qeth_l2_main.o +qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o obj-$(CONFIG_QETH_L2) += qeth_l2.o qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o obj-$(CONFIG_QETH_L3) += qeth_l3.o diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 147bb1a69ab..d837c3c5330 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -1,5 +1,4 @@ /* - * drivers/s390/net/claw.c * ESCON CLAW network driver * * Linux for zSeries version @@ -136,7 +135,6 @@ static inline void claw_set_busy(struct net_device *dev) { ((struct claw_privbk *)dev->ml_priv)->tbusy = 1; - eieio(); } static inline void @@ -144,13 +142,11 @@ claw_clear_busy(struct net_device *dev) { clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy)); netif_wake_queue(dev); - eieio(); } static inline int claw_check_busy(struct net_device *dev) { - eieio(); return ((struct claw_privbk *) dev->ml_priv)->tbusy; } @@ -233,8 +229,6 @@ static ssize_t claw_rbuff_show(struct device *dev, static ssize_t claw_rbuff_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); -static int claw_add_files(struct device *dev); -static void claw_remove_files(struct device *dev); /* Functions for System Validate */ static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw); @@ -263,14 +257,14 @@ static struct device *claw_root_dev; /* ccwgroup table */ static struct ccwgroup_driver claw_group_driver = { - .owner = THIS_MODULE, - .name = "claw", - .max_slaves = 2, - .driver_id = 0xC3D3C1E6, - .probe = claw_probe, - .remove = claw_remove_device, - .set_online = claw_new_device, - .set_offline = claw_shutdown_device, + .driver = { + .owner = THIS_MODULE, + .name = "claw", + }, + .setup = claw_probe, + .remove = claw_remove_device, + .set_online = claw_new_device, + .set_offline = claw_shutdown_device, .prepare = claw_pm_prepare, }; @@ -281,37 +275,34 @@ static struct ccw_device_id claw_ids[] = { MODULE_DEVICE_TABLE(ccw, claw_ids); static struct ccw_driver claw_ccw_driver = { - .owner = THIS_MODULE, - .name = "claw", + .driver = { + .owner = THIS_MODULE, + .name = "claw", + }, .ids = claw_ids, .probe = ccwgroup_probe_ccwdev, .remove = ccwgroup_remove_ccwdev, + .int_class = IRQIO_CLW, }; -static ssize_t -claw_driver_group_store(struct device_driver *ddrv, const char *buf, - size_t count) +static ssize_t claw_driver_group_store(struct device_driver *ddrv, + const char *buf, size_t count) { int err; - err = ccwgroup_create_from_string(claw_root_dev, - claw_group_driver.driver_id, - &claw_ccw_driver, 3, buf); + err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf); return err ? err : count; } - static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store); -static struct attribute *claw_group_attrs[] = { +static struct attribute *claw_drv_attrs[] = { &driver_attr_group.attr, NULL, }; - -static struct attribute_group claw_group_attr_group = { - .attrs = claw_group_attrs, +static struct attribute_group claw_drv_attr_group = { + .attrs = claw_drv_attrs, }; - -static const struct attribute_group *claw_group_attr_groups[] = { - &claw_group_attr_group, +static const struct attribute_group *claw_drv_attr_groups[] = { + &claw_drv_attr_group, NULL, }; @@ -319,60 +310,6 @@ static const struct attribute_group *claw_group_attr_groups[] = { * Key functions */ -/*----------------------------------------------------------------* - * claw_probe * - * this function is called for each CLAW device. * - *----------------------------------------------------------------*/ -static int -claw_probe(struct ccwgroup_device *cgdev) -{ - int rc; - struct claw_privbk *privptr=NULL; - - CLAW_DBF_TEXT(2, setup, "probe"); - if (!get_device(&cgdev->dev)) - return -ENODEV; - privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); - dev_set_drvdata(&cgdev->dev, privptr); - if (privptr == NULL) { - probe_error(cgdev); - put_device(&cgdev->dev); - CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); - return -ENOMEM; - } - privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL); - privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL); - if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) { - probe_error(cgdev); - put_device(&cgdev->dev); - CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); - return -ENOMEM; - } - memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8); - memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8); - memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8); - privptr->p_env->packing = 0; - privptr->p_env->write_buffers = 5; - privptr->p_env->read_buffers = 5; - privptr->p_env->read_size = CLAW_FRAME_SIZE; - privptr->p_env->write_size = CLAW_FRAME_SIZE; - rc = claw_add_files(&cgdev->dev); - if (rc) { - probe_error(cgdev); - put_device(&cgdev->dev); - dev_err(&cgdev->dev, "Creating the /proc files for a new" - " CLAW device failed\n"); - CLAW_DBF_TEXT_(2, setup, "probex%d", rc); - return rc; - } - privptr->p_env->p_priv = privptr; - cgdev->cdev[0]->handler = claw_irq_handler; - cgdev->cdev[1]->handler = claw_irq_handler; - CLAW_DBF_TEXT(2, setup, "prbext 0"); - - return 0; -} /* end of claw_probe */ - /*-------------------------------------------------------------------* * claw_tx * *-------------------------------------------------------------------*/ @@ -386,7 +323,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev) struct chbk *p_ch; CLAW_DBF_TEXT(4, trace, "claw_tx"); - p_ch=&privptr->channel[WRITE]; + p_ch = &privptr->channel[WRITE_CHANNEL]; spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); rc=claw_hw_tx( skb, dev, 1 ); spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); @@ -407,7 +344,7 @@ static struct sk_buff * claw_pack_skb(struct claw_privbk *privptr) { struct sk_buff *new_skb,*held_skb; - struct chbk *p_ch = &privptr->channel[WRITE]; + struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL]; struct claw_env *p_env = privptr->p_env; int pkt_cnt,pk_ind,so_far; @@ -515,15 +452,15 @@ claw_open(struct net_device *dev) privptr->p_env->write_size=CLAW_FRAME_SIZE; } claw_set_busy(dev); - tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet, - (unsigned long) &privptr->channel[READ]); + tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet, + (unsigned long) &privptr->channel[READ_CHANNEL]); for ( i = 0; i < 2; i++) { CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i); init_waitqueue_head(&privptr->channel[i].wait); /* skb_queue_head_init(&p_ch->io_queue); */ - if (i == WRITE) + if (i == WRITE_CHANNEL) skb_queue_head_init( - &privptr->channel[WRITE].collect_queue); + &privptr->channel[WRITE_CHANNEL].collect_queue); privptr->channel[i].flag_a = 0; privptr->channel[i].IO_active = 0; privptr->channel[i].flag &= ~CLAW_TIMER; @@ -551,12 +488,12 @@ claw_open(struct net_device *dev) if((privptr->channel[i].flag & CLAW_TIMER) == 0x00) del_timer(&timer); } - if ((((privptr->channel[READ].last_dstat | - privptr->channel[WRITE].last_dstat) & + if ((((privptr->channel[READ_CHANNEL].last_dstat | + privptr->channel[WRITE_CHANNEL].last_dstat) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || - (((privptr->channel[READ].flag | - privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { - dev_info(&privptr->channel[READ].cdev->dev, + (((privptr->channel[READ_CHANNEL].flag | + privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) { + dev_info(&privptr->channel[READ_CHANNEL].cdev->dev, "%s: remote side is not ready\n", dev->name); CLAW_DBF_TEXT(2, trace, "notrdy"); @@ -608,8 +545,8 @@ claw_open(struct net_device *dev) } } privptr->buffs_alloc = 0; - privptr->channel[READ].flag= 0x00; - privptr->channel[WRITE].flag = 0x00; + privptr->channel[READ_CHANNEL].flag = 0x00; + privptr->channel[WRITE_CHANNEL].flag = 0x00; privptr->p_buff_ccw=NULL; privptr->p_buff_read=NULL; privptr->p_buff_write=NULL; @@ -652,10 +589,10 @@ claw_irq_handler(struct ccw_device *cdev, } /* Try to extract channel from driver data. */ - if (privptr->channel[READ].cdev == cdev) - p_ch = &privptr->channel[READ]; - else if (privptr->channel[WRITE].cdev == cdev) - p_ch = &privptr->channel[WRITE]; + if (privptr->channel[READ_CHANNEL].cdev == cdev) + p_ch = &privptr->channel[READ_CHANNEL]; + else if (privptr->channel[WRITE_CHANNEL].cdev == cdev) + p_ch = &privptr->channel[WRITE_CHANNEL]; else { dev_warn(&cdev->dev, "The device is not a CLAW device\n"); CLAW_DBF_TEXT(2, trace, "badchan"); @@ -773,7 +710,7 @@ claw_irq_handler(struct ccw_device *cdev, case CLAW_START_WRITE: if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { dev_info(&cdev->dev, - "%s: Unit Check Occured in " + "%s: Unit Check Occurred in " "write channel\n", dev->name); clear_bit(0, (void *)&p_ch->IO_active); if (p_ch->irb->ecw[0] & 0x80) { @@ -813,7 +750,7 @@ claw_irq_handler(struct ccw_device *cdev, claw_clearbit_busy(TB_TX, dev); claw_clear_busy(dev); } - p_ch_r = (struct chbk *)&privptr->channel[READ]; + p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL]; if (test_and_set_bit(CLAW_BH_ACTIVE, (void *)&p_ch_r->flag_a) == 0) tasklet_schedule(&p_ch_r->tasklet); @@ -839,12 +776,10 @@ claw_irq_tasklet ( unsigned long data ) { struct chbk * p_ch; struct net_device *dev; - struct claw_privbk * privptr; p_ch = (struct chbk *) data; dev = (struct net_device *)p_ch->ndev; CLAW_DBF_TEXT(4, trace, "IRQtask"); - privptr = (struct claw_privbk *)dev->ml_priv; unpack_read(dev); clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); CLAW_DBF_TEXT(4, trace, "TskletXt"); @@ -878,13 +813,13 @@ claw_release(struct net_device *dev) for ( i = 1; i >=0 ; i--) { spin_lock_irqsave( get_ccwdev_lock(privptr->channel[i].cdev), saveflags); - /* del_timer(&privptr->channel[READ].timer); */ + /* del_timer(&privptr->channel[READ_CHANNEL].timer); */ privptr->channel[i].claw_state = CLAW_STOP; privptr->channel[i].IO_active = 0; parm = (unsigned long) &privptr->channel[i]; - if (i == WRITE) + if (i == WRITE_CHANNEL) claw_purge_skb_queue( - &privptr->channel[WRITE].collect_queue); + &privptr->channel[WRITE_CHANNEL].collect_queue); rc = ccw_device_halt (privptr->channel[i].cdev, parm); if (privptr->system_validate_comp==0x00) /* never opened? */ init_waitqueue_head(&privptr->channel[i].wait); @@ -971,16 +906,16 @@ claw_release(struct net_device *dev) privptr->mtc_skipping = 1; privptr->mtc_offset=0; - if (((privptr->channel[READ].last_dstat | - privptr->channel[WRITE].last_dstat) & + if (((privptr->channel[READ_CHANNEL].last_dstat | + privptr->channel[WRITE_CHANNEL].last_dstat) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { - dev_warn(&privptr->channel[READ].cdev->dev, + dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev, "Deactivating %s completed with incorrect" " subchannel status " "(read %02x, write %02x)\n", dev->name, - privptr->channel[READ].last_dstat, - privptr->channel[WRITE].last_dstat); + privptr->channel[READ_CHANNEL].last_dstat, + privptr->channel[WRITE_CHANNEL].last_dstat); CLAW_DBF_TEXT(2, trace, "badclose"); } CLAW_DBF_TEXT(4, trace, "rlsexit"); @@ -1020,7 +955,6 @@ claw_write_next ( struct chbk * p_ch ) struct net_device *dev; struct claw_privbk *privptr=NULL; struct sk_buff *pk_skb; - int rc; CLAW_DBF_TEXT(4, trace, "claw_wrt"); if (p_ch->claw_state == CLAW_STOP) @@ -1032,7 +966,7 @@ claw_write_next ( struct chbk * p_ch ) !skb_queue_empty(&p_ch->collect_queue)) { pk_skb = claw_pack_skb(privptr); while (pk_skb != NULL) { - rc = claw_hw_tx( pk_skb, dev,1); + claw_hw_tx(pk_skb, dev, 1); if (privptr->write_free_count > 0) { pk_skb = claw_pack_skb(privptr); } else @@ -1316,15 +1250,12 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) unsigned char *pDataAddress; struct endccw *pEnd; struct ccw1 tempCCW; - struct chbk *p_ch; struct claw_env *p_env; - int lock; struct clawph *pk_head; struct chbk *ch; CLAW_DBF_TEXT(4, trace, "hw_tx"); privptr = (struct claw_privbk *)(dev->ml_priv); - p_ch=(struct chbk *)&privptr->channel[WRITE]; p_env =privptr->p_env; claw_free_wrt_buf(dev); /* Clean up free chain if posible */ /* scan the write queue to free any completed write packets */ @@ -1357,7 +1288,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) claw_strt_out_IO(dev ); claw_free_wrt_buf( dev ); if (privptr->write_free_count==0) { - ch = &privptr->channel[WRITE]; + ch = &privptr->channel[WRITE_CHANNEL]; atomic_inc(&skb->users); skb_queue_tail(&ch->collect_queue, skb); goto Done; @@ -1369,7 +1300,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) } /* tx lock */ if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ - ch = &privptr->channel[WRITE]; + ch = &privptr->channel[WRITE_CHANNEL]; atomic_inc(&skb->users); skb_queue_tail(&ch->collect_queue, skb); claw_strt_out_IO(dev ); @@ -1385,7 +1316,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) privptr->p_write_free_chain == NULL ) { claw_setbit_busy(TB_NOBUFFER,dev); - ch = &privptr->channel[WRITE]; + ch = &privptr->channel[WRITE_CHANNEL]; atomic_inc(&skb->users); skb_queue_tail(&ch->collect_queue, skb); CLAW_DBF_TEXT(2, trace, "clawbusy"); @@ -1397,7 +1328,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) while (len_of_data > 0) { p_this_ccw=privptr->p_write_free_chain; /* get a block */ if (p_this_ccw == NULL) { /* lost the race */ - ch = &privptr->channel[WRITE]; + ch = &privptr->channel[WRITE_CHANNEL]; atomic_inc(&skb->users); skb_queue_tail(&ch->collect_queue, skb); goto Done2; @@ -1505,12 +1436,6 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) } /* endif (p_first_ccw!=NULL) */ dev_kfree_skb_any(skb); - if (linkid==0) { - lock=LOCK_NO; - } - else { - lock=LOCK_YES; - } claw_strt_out_IO(dev ); /* if write free count is zero , set NOBUFFER */ if (privptr->write_free_count==0) { @@ -2067,7 +1992,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) *catch up to each other */ privptr = dev->ml_priv; p_env=privptr->p_env; - tdev = &privptr->channel[READ].cdev->dev; + tdev = &privptr->channel[READ_CHANNEL].cdev->dev; memcpy( &temp_host_name, p_env->host_name, 8); memcpy( &temp_ws_name, p_env->adapter_name , 8); dev_info(tdev, "%s: CLAW device %.8s: " @@ -2245,7 +2170,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) dev->name, temp_ws_name, p_ctlbk->linkid); privptr->active_link_ID = p_ctlbk->linkid; - p_ch = &privptr->channel[WRITE]; + p_ch = &privptr->channel[WRITE_CHANNEL]; wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ break; case CONNECTION_RESPONSE: @@ -2296,7 +2221,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) "%s: Confirmed Now packing\n", dev->name); p_env->packing = DO_PACKED; } - p_ch = &privptr->channel[WRITE]; + p_ch = &privptr->channel[WRITE_CHANNEL]; wake_up(&p_ch->wait); } else { dev_warn(tdev, "Activating %s failed because of" @@ -2556,7 +2481,7 @@ unpack_read(struct net_device *dev ) p_packd=NULL; privptr = dev->ml_priv; - p_dev = &privptr->channel[READ].cdev->dev; + p_dev = &privptr->channel[READ_CHANNEL].cdev->dev; p_env = privptr->p_env; p_this_ccw=privptr->p_read_active_first; while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { @@ -2728,7 +2653,7 @@ claw_strt_read (struct net_device *dev, int lock ) struct ccwbk*p_ccwbk; struct chbk *p_ch; struct clawh *p_clawh; - p_ch=&privptr->channel[READ]; + p_ch = &privptr->channel[READ_CHANNEL]; CLAW_DBF_TEXT(4, trace, "StRdNter"); p_clawh=(struct clawh *)privptr->p_claw_signal_blk; @@ -2782,7 +2707,7 @@ claw_strt_out_IO( struct net_device *dev ) return; } privptr = (struct claw_privbk *)dev->ml_priv; - p_ch=&privptr->channel[WRITE]; + p_ch = &privptr->channel[WRITE_CHANNEL]; CLAW_DBF_TEXT(4, trace, "strt_io"); p_first_ccw=privptr->p_write_active_first; @@ -2815,15 +2740,11 @@ claw_free_wrt_buf( struct net_device *dev ) { struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv; - struct ccwbk*p_first_ccw; - struct ccwbk*p_last_ccw; struct ccwbk*p_this_ccw; struct ccwbk*p_next_ccw; CLAW_DBF_TEXT(4, trace, "freewrtb"); /* scan the write queue to free any completed write packets */ - p_first_ccw=NULL; - p_last_ccw=NULL; p_this_ccw=privptr->p_write_active_first; while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING)) { @@ -2875,7 +2796,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev) if (dev->flags & IFF_RUNNING) claw_release(dev); if (privptr) { - privptr->channel[READ].ndev = NULL; /* say it's free */ + privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */ } dev->ml_priv = NULL; #ifdef MODULE @@ -2960,18 +2881,18 @@ claw_new_device(struct ccwgroup_device *cgdev) struct ccw_dev_id dev_id; dev_info(&cgdev->dev, "add for %s\n", - dev_name(&cgdev->cdev[READ]->dev)); + dev_name(&cgdev->cdev[READ_CHANNEL]->dev)); CLAW_DBF_TEXT(2, setup, "new_dev"); privptr = dev_get_drvdata(&cgdev->dev); - dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr); - dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr); + dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr); + dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr); if (!privptr) return -ENODEV; p_env = privptr->p_env; - ccw_device_get_id(cgdev->cdev[READ], &dev_id); - p_env->devno[READ] = dev_id.devno; - ccw_device_get_id(cgdev->cdev[WRITE], &dev_id); - p_env->devno[WRITE] = dev_id.devno; + ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id); + p_env->devno[READ_CHANNEL] = dev_id.devno; + ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id); + p_env->devno[WRITE_CHANNEL] = dev_id.devno; ret = add_channel(cgdev->cdev[0],0,privptr); if (ret == 0) ret = add_channel(cgdev->cdev[1],1,privptr); @@ -2980,14 +2901,14 @@ claw_new_device(struct ccwgroup_device *cgdev) " failed with error code %d\n", ret); goto out; } - ret = ccw_device_set_online(cgdev->cdev[READ]); + ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]); if (ret != 0) { dev_warn(&cgdev->dev, "Setting the read subchannel online" " failed with error code %d\n", ret); goto out; } - ret = ccw_device_set_online(cgdev->cdev[WRITE]); + ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]); if (ret != 0) { dev_warn(&cgdev->dev, "Setting the write subchannel online " @@ -3002,8 +2923,8 @@ claw_new_device(struct ccwgroup_device *cgdev) } dev->ml_priv = privptr; dev_set_drvdata(&cgdev->dev, privptr); - dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr); - dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr); + dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr); + dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr); /* sysfs magic */ SET_NETDEV_DEV(dev, &cgdev->dev); if (register_netdev(dev) != 0) { @@ -3021,16 +2942,16 @@ claw_new_device(struct ccwgroup_device *cgdev) goto out; } } - privptr->channel[READ].ndev = dev; - privptr->channel[WRITE].ndev = dev; + privptr->channel[READ_CHANNEL].ndev = dev; + privptr->channel[WRITE_CHANNEL].ndev = dev; privptr->p_env->ndev = dev; dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d " "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", dev->name, p_env->read_size, p_env->write_size, p_env->read_buffers, - p_env->write_buffers, p_env->devno[READ], - p_env->devno[WRITE]); + p_env->write_buffers, p_env->devno[READ_CHANNEL], + p_env->devno[WRITE_CHANNEL]); dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name " ":%.8s api_type: %.8s\n", dev->name, p_env->host_name, @@ -3066,16 +2987,16 @@ claw_shutdown_device(struct ccwgroup_device *cgdev) { struct claw_privbk *priv; struct net_device *ndev; - int ret; + int ret = 0; CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); priv = dev_get_drvdata(&cgdev->dev); if (!priv) return -ENODEV; - ndev = priv->channel[READ].ndev; + ndev = priv->channel[READ_CHANNEL].ndev; if (ndev) { /* Close the device */ - dev_info(&cgdev->dev, "%s: shutting down \n", + dev_info(&cgdev->dev, "%s: shutting down\n", ndev->name); if (ndev->flags & IFF_RUNNING) ret = claw_release(ndev); @@ -3083,13 +3004,13 @@ claw_shutdown_device(struct ccwgroup_device *cgdev) unregister_netdev(ndev); ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */ claw_free_netdevice(ndev, 1); - priv->channel[READ].ndev = NULL; - priv->channel[WRITE].ndev = NULL; + priv->channel[READ_CHANNEL].ndev = NULL; + priv->channel[WRITE_CHANNEL].ndev = NULL; priv->p_env->ndev = NULL; } ccw_device_set_offline(cgdev->cdev[1]); ccw_device_set_offline(cgdev->cdev[0]); - return 0; + return ret; } static void @@ -3097,14 +3018,11 @@ claw_remove_device(struct ccwgroup_device *cgdev) { struct claw_privbk *priv; - BUG_ON(!cgdev); CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); priv = dev_get_drvdata(&cgdev->dev); - BUG_ON(!priv); dev_info(&cgdev->dev, " will be removed.\n"); if (cgdev->state == CCWGROUP_ONLINE) claw_shutdown_device(cgdev); - claw_remove_files(&cgdev->dev); kfree(priv->p_mtc_envelope); priv->p_mtc_envelope=NULL; kfree(priv->p_env); @@ -3115,8 +3033,8 @@ claw_remove_device(struct ccwgroup_device *cgdev) priv->channel[1].irb=NULL; kfree(priv); dev_set_drvdata(&cgdev->dev, NULL); - dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL); - dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL); + dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL); + dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL); put_device(&cgdev->dev); return; @@ -3332,7 +3250,6 @@ claw_rbuff_write(struct device *dev, struct device_attribute *attr, CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers); return count; } - static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write); static struct attribute *claw_attr[] = { @@ -3343,40 +3260,73 @@ static struct attribute *claw_attr[] = { &dev_attr_host_name.attr, NULL, }; - static struct attribute_group claw_attr_group = { .attrs = claw_attr, }; +static const struct attribute_group *claw_attr_groups[] = { + &claw_attr_group, + NULL, +}; +static const struct device_type claw_devtype = { + .name = "claw", + .groups = claw_attr_groups, +}; -static int -claw_add_files(struct device *dev) +/*----------------------------------------------------------------* + * claw_probe * + * this function is called for each CLAW device. * + *----------------------------------------------------------------*/ +static int claw_probe(struct ccwgroup_device *cgdev) { - CLAW_DBF_TEXT(2, setup, "add_file"); - return sysfs_create_group(&dev->kobj, &claw_attr_group); -} + struct claw_privbk *privptr = NULL; -static void -claw_remove_files(struct device *dev) -{ - CLAW_DBF_TEXT(2, setup, "rem_file"); - sysfs_remove_group(&dev->kobj, &claw_attr_group); -} + CLAW_DBF_TEXT(2, setup, "probe"); + if (!get_device(&cgdev->dev)) + return -ENODEV; + privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); + dev_set_drvdata(&cgdev->dev, privptr); + if (privptr == NULL) { + probe_error(cgdev); + put_device(&cgdev->dev); + CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); + return -ENOMEM; + } + privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL); + privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL); + if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) { + probe_error(cgdev); + put_device(&cgdev->dev); + CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); + return -ENOMEM; + } + memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8); + memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8); + memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8); + privptr->p_env->packing = 0; + privptr->p_env->write_buffers = 5; + privptr->p_env->read_buffers = 5; + privptr->p_env->read_size = CLAW_FRAME_SIZE; + privptr->p_env->write_size = CLAW_FRAME_SIZE; + privptr->p_env->p_priv = privptr; + cgdev->cdev[0]->handler = claw_irq_handler; + cgdev->cdev[1]->handler = claw_irq_handler; + cgdev->dev.type = &claw_devtype; + CLAW_DBF_TEXT(2, setup, "prbext 0"); + + return 0; +} /* end of claw_probe */ /*--------------------------------------------------------------------* * claw_init and cleanup * *---------------------------------------------------------------------*/ -static void __exit -claw_cleanup(void) +static void __exit claw_cleanup(void) { - driver_remove_file(&claw_group_driver.driver, - &driver_attr_group); ccwgroup_driver_unregister(&claw_group_driver); ccw_driver_unregister(&claw_ccw_driver); root_device_unregister(claw_root_dev); claw_unregister_debug_facility(); pr_info("Driver unloaded\n"); - } /** @@ -3385,8 +3335,7 @@ claw_cleanup(void) * * @return 0 on success, !0 on error. */ -static int __init -claw_init(void) +static int __init claw_init(void) { int ret = 0; @@ -3399,13 +3348,13 @@ claw_init(void) } CLAW_DBF_TEXT(2, setup, "init_mod"); claw_root_dev = root_device_register("claw"); - ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0; + ret = PTR_ERR_OR_ZERO(claw_root_dev); if (ret) goto register_err; ret = ccw_driver_register(&claw_ccw_driver); if (ret) goto ccw_err; - claw_group_driver.driver.groups = claw_group_attr_groups; + claw_group_driver.driver.groups = claw_drv_attr_groups; ret = ccwgroup_driver_register(&claw_group_driver); if (ret) goto ccwgroup_err; @@ -3428,5 +3377,5 @@ module_exit(claw_cleanup); MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>"); MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \ - "Copyright 2000,2008 IBM Corporation\n"); + "Copyright IBM Corp. 2000, 2008\n"); MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h index 46d59a13db1..3339b9b607b 100644 --- a/drivers/s390/net/claw.h +++ b/drivers/s390/net/claw.h @@ -74,8 +74,8 @@ #define MAX_ENVELOPE_SIZE 65536 #define CLAW_DEFAULT_MTU_SIZE 4096 #define DEF_PACK_BUFSIZE 32768 -#define READ 0 -#define WRITE 1 +#define READ_CHANNEL 0 +#define WRITE_CHANNEL 1 #define TB_TX 0 /* sk buffer handling in process */ #define TB_STOP 1 /* network device stop in process */ @@ -114,15 +114,9 @@ do { \ debug_event(claw_dbf_##name,level,(void*)(addr),len); \ } while (0) -/* Allow to sort out low debug levels early to avoid wasted sprints */ -static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level) -{ - return (level <= dbf_grp->level); -} - #define CLAW_DBF_TEXT_(level,name,text...) \ do { \ - if (claw_dbf_passes(claw_dbf_##name, level)) { \ + if (debug_level_enabled(claw_dbf_##name, level)) { \ sprintf(debug_buffer, text); \ debug_text_event(claw_dbf_##name, level, \ debug_buffer); \ diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c index 1ca58f15347..8363f1c966e 100644 --- a/drivers/s390/net/ctcm_dbug.c +++ b/drivers/s390/net/ctcm_dbug.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/ctcm_dbug.c - * * Copyright IBM Corp. 2001, 2007 * Authors: Peter Tiedemann (ptiedem@de.ibm.com) * @@ -10,7 +8,6 @@ #include <linux/string.h> #include <linux/kernel.h> #include <linux/errno.h> -#include <linux/slab.h> #include <linux/ctype.h> #include <linux/sysctl.h> #include <linux/module.h> @@ -69,7 +66,7 @@ void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...) char dbf_txt_buf[64]; va_list args; - if (level > (ctcm_dbf[dbf_nix].id)->level) + if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level)) return; va_start(args, fmt); vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h index 26966d0b9ab..47bf0501995 100644 --- a/drivers/s390/net/ctcm_dbug.h +++ b/drivers/s390/net/ctcm_dbug.h @@ -1,6 +1,4 @@ /* - * drivers/s390/net/ctcm_dbug.h - * * Copyright IBM Corp. 2001, 2007 * Authors: Peter Tiedemann (ptiedem@de.ibm.com) * diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 70eb7f13841..fb92524d24e 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/ctcm_fsms.c - * * Copyright IBM Corp. 2001, 2007 * Authors: Fritz Elfert (felfert@millenux.com) * Peter Tiedemann (ptiedem@de.ibm.com) @@ -184,7 +182,7 @@ static void ctcmpc_chx_resend(fsm_instance *, int, void *); static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); /** - * Check return code of a preceeding ccw_device call, halt_IO etc... + * Check return code of a preceding ccw_device call, halt_IO etc... * * ch : The channel, the error belongs to. * Returns the error code (!= 0) to inspect. @@ -454,7 +452,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) if ((fsmstate == CTC_STATE_SETUPWAIT) && (ch->protocol == CTCM_PROTO_OS390)) { /* OS/390 resp. z/OS */ - if (CHANNEL_DIRECTION(ch->flags) == READ) { + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); @@ -472,14 +470,14 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) * if in compatibility mode, since VM TCP delays the initial * frame until it has some data to send. */ - if ((CHANNEL_DIRECTION(ch->flags) == WRITE) || + if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) || (ch->protocol != CTCM_PROTO_S390)) fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; ch->ccw[1].count = 2; /* Transfer only length */ - fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) + fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long)ch, 0xff, 0); @@ -495,7 +493,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) * reply from VM TCP which brings up the RX channel to it's * final state. */ - if ((CHANNEL_DIRECTION(ch->flags) == READ) && + if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) && (ch->protocol == CTCM_PROTO_S390)) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; @@ -600,15 +598,15 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) int rc; CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", - CTCM_FUNTAIL, ch->id, - (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); + CTCM_FUNTAIL, ch->id, + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX"); if (ch->trans_skb != NULL) { clear_normalized_cda(&ch->ccw[1]); dev_kfree_skb(ch->trans_skb); ch->trans_skb = NULL; } - if (CHANNEL_DIRECTION(ch->flags) == READ) { + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { ch->ccw[1].cmd_code = CCW_CMD_READ; ch->ccw[1].flags = CCW_FLAG_SLI; ch->ccw[1].count = 0; @@ -622,7 +620,8 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) "%s(%s): %s trans_skb alloc delayed " "until first transfer", CTCM_FUNTAIL, ch->id, - (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? + "RX" : "TX"); } ch->ccw[0].cmd_code = CCW_CMD_PREPARE; ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; @@ -720,7 +719,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state, ch->th_seg = 0x00; ch->th_seq_num = 0x00; - if (CHANNEL_DIRECTION(ch->flags) == READ) { + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { skb_queue_purge(&ch->io_queue); fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); } else { @@ -799,7 +798,8 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CTC_STATE_STARTRETRY); fsm_deltimer(&ch->timer); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); - if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) { + if (!IS_MPC(ch) && + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); if (rc != 0) ctcm_ccw_check_rc(ch, rc, @@ -811,10 +811,10 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, "%s(%s) : %s error during %s channel setup state=%s\n", CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], - (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX", + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX", fsm_getstate_str(fi)); - if (CHANNEL_DIRECTION(ch->flags) == READ) { + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { fsm_newstate(fi, CTC_STATE_RXERR); fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); } else { @@ -945,7 +945,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); fsm_newstate(fi, CTC_STATE_DTERM); - ch2 = priv->channel[WRITE]; + ch2 = priv->channel[CTCM_WRITE]; fsm_newstate(ch2->fsm, CTC_STATE_DTERM); ccw_device_halt(ch->cdev, (unsigned long)ch); @@ -1074,13 +1074,13 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) fsm_deltimer(&ch->timer); CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s: %s: %s unrecoverable channel error", - CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX"); + CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX"); if (IS_MPC(ch)) { priv->stats.tx_dropped++; priv->stats.tx_errors++; } - if (rd == READ) { + if (rd == CTCM_READ) { fsm_newstate(fi, CTC_STATE_RXERR); fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); } else { @@ -1339,6 +1339,12 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) spin_unlock(&ch->collect_lock); clear_normalized_cda(&ch->ccw[1]); + + CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", + (void *)(unsigned long)ch->ccw[1].cda, + ch->trans_skb->data); + ch->ccw[1].count = ch->max_bufsize; + if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { dev_kfree_skb_any(ch->trans_skb); ch->trans_skb = NULL; @@ -1348,6 +1354,11 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); return; } + + CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", + (void *)(unsigned long)ch->ccw[1].cda, + ch->trans_skb->data); + ch->ccw[1].count = ch->trans_skb->len; fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); ch->prof.send_stamp = current_kernel_time(); /* xtime */ @@ -1503,7 +1514,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) switch (fsm_getstate(fi)) { case CTC_STATE_STARTRETRY: case CTC_STATE_SETUPWAIT: - if (CHANNEL_DIRECTION(ch->flags) == READ) { + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { ctcmpc_chx_rxidle(fi, event, arg); } else { fsm_newstate(fi, CTC_STATE_TXIDLE); @@ -1512,9 +1523,9 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) goto done; default: break; - }; + } - fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) + fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); done: @@ -1753,8 +1764,8 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) struct net_device *dev = ach->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg; - struct channel *wch = priv->channel[WRITE]; - struct channel *rch = priv->channel[READ]; + struct channel *wch = priv->channel[CTCM_WRITE]; + struct channel *rch = priv->channel[CTCM_READ]; struct sk_buff *skb; struct th_sweep *header; int rc = 0; @@ -2070,7 +2081,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); if (IS_MPC(priv)) priv->mpcg->channels_terminating = 0; - for (direction = READ; direction <= WRITE; direction++) { + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { struct channel *ch = priv->channel[direction]; fsm_event(ch->fsm, CTC_EVENT_START, ch); } @@ -2092,7 +2103,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg) CTCMY_DBF_DEV_NAME(SETUP, dev, ""); fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); - for (direction = READ; direction <= WRITE; direction++) { + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { struct channel *ch = priv->channel[direction]; fsm_event(ch->fsm, CTC_EVENT_STOP, ch); ch->th_seq_num = 0x00; @@ -2183,11 +2194,11 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg) if (IS_MPC(priv)) { if (event == DEV_EVENT_RXUP) - mpc_channel_action(priv->channel[READ], - READ, MPC_CHANNEL_ADD); + mpc_channel_action(priv->channel[CTCM_READ], + CTCM_READ, MPC_CHANNEL_ADD); else - mpc_channel_action(priv->channel[WRITE], - WRITE, MPC_CHANNEL_ADD); + mpc_channel_action(priv->channel[CTCM_WRITE], + CTCM_WRITE, MPC_CHANNEL_ADD); } } @@ -2239,11 +2250,11 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg) } if (IS_MPC(priv)) { if (event == DEV_EVENT_RXDOWN) - mpc_channel_action(priv->channel[READ], - READ, MPC_CHANNEL_REMOVE); + mpc_channel_action(priv->channel[CTCM_READ], + CTCM_READ, MPC_CHANNEL_REMOVE); else - mpc_channel_action(priv->channel[WRITE], - WRITE, MPC_CHANNEL_REMOVE); + mpc_channel_action(priv->channel[CTCM_WRITE], + CTCM_WRITE, MPC_CHANNEL_REMOVE); } } diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h index 046d077fabb..c963d04799c 100644 --- a/drivers/s390/net/ctcm_fsms.h +++ b/drivers/s390/net/ctcm_fsms.h @@ -1,6 +1,4 @@ /* - * drivers/s390/net/ctcm_fsms.h - * * Copyright IBM Corp. 2001, 2007 * Authors: Fritz Elfert (felfert@millenux.com) * Peter Tiedemann (ptiedem@de.ibm.com) diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index e35713dd050..03b6ad03557 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/ctcm_main.c - * * Copyright IBM Corp. 2001, 2009 * Author(s): * Original CTC driver(s): @@ -267,7 +265,7 @@ static struct channel *channel_get(enum ctcm_channel_types type, else { ch->flags |= CHANNEL_FLAGS_INUSE; ch->flags &= ~CHANNEL_FLAGS_RWMASK; - ch->flags |= (direction == WRITE) + ch->flags |= (direction == CTCM_WRITE) ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ; fsm_newstate(ch->fsm, CTC_STATE_STOPPED); } @@ -388,7 +386,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch) CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): %s trans_skb allocation error", CTCM_FUNTAIL, ch->id, - (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? + "RX" : "TX"); return -ENOMEM; } @@ -399,7 +398,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch) CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): %s set norm_cda failed", CTCM_FUNTAIL, ch->id, - (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? + "RX" : "TX"); return -ENOMEM; } @@ -560,6 +560,9 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[ccw_idx], + sizeof(struct ccw1) * 3); ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); @@ -603,14 +606,14 @@ static void ctcmpc_send_sweep_req(struct channel *rch) priv = dev->ml_priv; grp = priv->mpcg; - ch = priv->channel[WRITE]; + ch = priv->channel[CTCM_WRITE]; /* sweep processing is not complete until response and request */ /* has completed for all read channels in group */ if (grp->in_sweep == 0) { grp->in_sweep = 1; - grp->sweep_rsp_pend_num = grp->active_channels[READ]; - grp->sweep_req_pend_num = grp->active_channels[READ]; + grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ]; + grp->sweep_req_pend_num = grp->active_channels[CTCM_READ]; } sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); @@ -669,7 +672,6 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) int ccw_idx; unsigned long hi; unsigned long saveflags = 0; /* avoids compiler warning */ - __u16 block_len; CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n", __func__, dev->name, smp_processor_id(), ch, @@ -716,7 +718,6 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) */ atomic_inc(&skb->users); - block_len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH; /* * IDAL support in CTCM is broken, so we have to * care about skb's above 2G ourselves. @@ -911,7 +912,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; dev->trans_start = jiffies; - if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0) + if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) return NETDEV_TX_BUSY; return NETDEV_TX_OK; } @@ -994,7 +995,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) } dev->trans_start = jiffies; - if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) { + if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) { CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): device error - dropped", CTCM_FUNTAIL, dev->name); @@ -1035,7 +1036,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu) return -EINVAL; priv = dev->ml_priv; - max_bufsize = priv->channel[READ]->max_bufsize; + max_bufsize = priv->channel[CTCM_READ]->max_bufsize; if (IS_MPC(priv)) { if (new_mtu > max_bufsize - TH_HEADER_LENGTH) @@ -1152,7 +1153,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) dev_fsm, dev_fsm_len, GFP_KERNEL); if (priv->fsm == NULL) { CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); - kfree(dev); + free_netdev(dev); return NULL; } fsm_newstate(priv->fsm, DEV_STATE_STOPPED); @@ -1163,7 +1164,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) grp = ctcmpc_init_mpc_group(priv); if (grp == NULL) { MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); - kfree(dev); + free_netdev(dev); return NULL; } tasklet_init(&grp->mpc_tasklet2, @@ -1226,10 +1227,10 @@ static void ctcm_irq_handler(struct ccw_device *cdev, priv = dev_get_drvdata(&cgdev->dev); /* Try to extract channel from driver data. */ - if (priv->channel[READ]->cdev == cdev) - ch = priv->channel[READ]; - else if (priv->channel[WRITE]->cdev == cdev) - ch = priv->channel[WRITE]; + if (priv->channel[CTCM_READ]->cdev == cdev) + ch = priv->channel[CTCM_READ]; + else if (priv->channel[CTCM_WRITE]->cdev == cdev) + ch = priv->channel[CTCM_WRITE]; else { dev_err(&cdev->dev, "%s: Internal error: Can't determine channel for " @@ -1293,6 +1294,11 @@ static void ctcm_irq_handler(struct ccw_device *cdev, } +static const struct device_type ctcm_devtype = { + .name = "ctcm", + .groups = ctcm_attr_groups, +}; + /** * Add ctcm specific attributes. * Add ctcm private data. @@ -1304,7 +1310,6 @@ static void ctcm_irq_handler(struct ccw_device *cdev, static int ctcm_probe_device(struct ccwgroup_device *cgdev) { struct ctcm_priv *priv; - int rc; CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s %p", @@ -1321,17 +1326,11 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev) put_device(&cgdev->dev); return -ENOMEM; } - - rc = ctcm_add_files(&cgdev->dev); - if (rc) { - kfree(priv); - put_device(&cgdev->dev); - return rc; - } priv->buffer_size = CTCM_BUFSIZE_DEFAULT; cgdev->cdev[0]->handler = ctcm_irq_handler; cgdev->cdev[1]->handler = ctcm_irq_handler; dev_set_drvdata(&cgdev->dev, priv); + cgdev->dev.type = &ctcm_devtype; return 0; } @@ -1364,8 +1363,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type, ch->protocol = priv->protocol; if (IS_MPC(priv)) { - ch->discontact_th = (struct th_header *) - kzalloc(TH_HEADER_LENGTH, gfp_type()); + ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type()); if (ch->discontact_th == NULL) goto nomem_return; @@ -1379,8 +1377,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type, } else ccw_num = 8; - ch->ccw = (struct ccw1 *) - kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); if (ch->ccw == NULL) goto nomem_return; @@ -1457,7 +1454,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type, ch_fsm_len, GFP_KERNEL); } if (ch->fsm == NULL) - goto free_return; + goto nomem_return; fsm_newstate(ch->fsm, CTC_STATE_IDLE); @@ -1589,13 +1586,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) goto out_ccw2; } - for (direction = READ; direction <= WRITE; direction++) { + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { priv->channel[direction] = - channel_get(type, direction == READ ? read_id : write_id, - direction); + channel_get(type, direction == CTCM_READ ? + read_id : write_id, direction); if (priv->channel[direction] == NULL) { - if (direction == WRITE) - channel_free(priv->channel[READ]); + if (direction == CTCM_WRITE) + channel_free(priv->channel[CTCM_READ]); goto out_dev; } priv->channel[direction]->netdev = dev; @@ -1610,26 +1607,19 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) goto out_dev; } - if (ctcm_add_attributes(&cgdev->dev)) { - result = -ENODEV; - goto out_unregister; - } - strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); dev_info(&dev->dev, "setup OK : r/w = %s/%s, protocol : %d\n", - priv->channel[READ]->id, - priv->channel[WRITE]->id, priv->protocol); + priv->channel[CTCM_READ]->id, + priv->channel[CTCM_WRITE]->id, priv->protocol); CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, - priv->channel[READ]->id, - priv->channel[WRITE]->id, priv->protocol); + priv->channel[CTCM_READ]->id, + priv->channel[CTCM_WRITE]->id, priv->protocol); return 0; -out_unregister: - unregister_netdev(dev); out_dev: ctcm_free_netdevice(dev); out_ccw2: @@ -1637,10 +1627,10 @@ out_ccw2: out_ccw1: ccw_device_set_offline(cgdev->cdev[0]); out_remove_channel2: - readc = channel_get(type, read_id, READ); + readc = channel_get(type, read_id, CTCM_READ); channel_remove(readc); out_remove_channel1: - writec = channel_get(type, write_id, WRITE); + writec = channel_get(type, write_id, CTCM_WRITE); channel_remove(writec); out_err_result: return result; @@ -1662,19 +1652,18 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev) if (!priv) return -ENODEV; - if (priv->channel[READ]) { - dev = priv->channel[READ]->netdev; + if (priv->channel[CTCM_READ]) { + dev = priv->channel[CTCM_READ]->netdev; CTCM_DBF_DEV(SETUP, dev, ""); /* Close the device */ ctcm_close(dev); dev->flags &= ~IFF_RUNNING; - ctcm_remove_attributes(&cgdev->dev); - channel_free(priv->channel[READ]); + channel_free(priv->channel[CTCM_READ]); } else dev = NULL; - if (priv->channel[WRITE]) - channel_free(priv->channel[WRITE]); + if (priv->channel[CTCM_WRITE]) + channel_free(priv->channel[CTCM_WRITE]); if (dev) { unregister_netdev(dev); @@ -1687,11 +1676,11 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev) ccw_device_set_offline(cgdev->cdev[1]); ccw_device_set_offline(cgdev->cdev[0]); - if (priv->channel[READ]) - channel_remove(priv->channel[READ]); - if (priv->channel[WRITE]) - channel_remove(priv->channel[WRITE]); - priv->channel[READ] = priv->channel[WRITE] = NULL; + if (priv->channel[CTCM_READ]) + channel_remove(priv->channel[CTCM_READ]); + if (priv->channel[CTCM_WRITE]) + channel_remove(priv->channel[CTCM_WRITE]); + priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL; return 0; @@ -1702,15 +1691,12 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev) { struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev); - BUG_ON(priv == NULL); - CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "removing device %p, proto : %d", cgdev, priv->protocol); if (cgdev->state == CCWGROUP_ONLINE) ctcm_shutdown_device(cgdev); - ctcm_remove_files(&cgdev->dev); dev_set_drvdata(&cgdev->dev, NULL); kfree(priv); put_device(&cgdev->dev); @@ -1722,11 +1708,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev) if (gdev->state == CCWGROUP_OFFLINE) return 0; - netif_device_detach(priv->channel[READ]->netdev); - ctcm_close(priv->channel[READ]->netdev); + netif_device_detach(priv->channel[CTCM_READ]->netdev); + ctcm_close(priv->channel[CTCM_READ]->netdev); if (!wait_event_timeout(priv->fsm->wait_q, fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) { - netif_device_attach(priv->channel[READ]->netdev); + netif_device_attach(priv->channel[CTCM_READ]->netdev); return -EBUSY; } ccw_device_set_offline(gdev->cdev[1]); @@ -1747,9 +1733,9 @@ static int ctcm_pm_resume(struct ccwgroup_device *gdev) rc = ccw_device_set_online(gdev->cdev[0]); if (rc) goto err_out; - ctcm_open(priv->channel[READ]->netdev); + ctcm_open(priv->channel[CTCM_READ]->netdev); err_out: - netif_device_attach(priv->channel[READ]->netdev); + netif_device_attach(priv->channel[CTCM_READ]->netdev); return rc; } @@ -1762,19 +1748,22 @@ static struct ccw_device_id ctcm_ids[] = { MODULE_DEVICE_TABLE(ccw, ctcm_ids); static struct ccw_driver ctcm_ccw_driver = { - .owner = THIS_MODULE, - .name = "ctcm", + .driver = { + .owner = THIS_MODULE, + .name = "ctcm", + }, .ids = ctcm_ids, .probe = ccwgroup_probe_ccwdev, .remove = ccwgroup_remove_ccwdev, + .int_class = IRQIO_CTC, }; static struct ccwgroup_driver ctcm_group_driver = { - .owner = THIS_MODULE, - .name = CTC_DRIVER_NAME, - .max_slaves = 2, - .driver_id = 0xC3E3C3D4, /* CTCM */ - .probe = ctcm_probe_device, + .driver = { + .owner = THIS_MODULE, + .name = CTC_DRIVER_NAME, + }, + .setup = ctcm_probe_device, .remove = ctcm_remove_device, .set_online = ctcm_new_device, .set_offline = ctcm_shutdown_device, @@ -1783,31 +1772,25 @@ static struct ccwgroup_driver ctcm_group_driver = { .restore = ctcm_pm_resume, }; -static ssize_t -ctcm_driver_group_store(struct device_driver *ddrv, const char *buf, - size_t count) +static ssize_t ctcm_driver_group_store(struct device_driver *ddrv, + const char *buf, size_t count) { int err; - err = ccwgroup_create_from_string(ctcm_root_dev, - ctcm_group_driver.driver_id, - &ctcm_ccw_driver, 2, buf); + err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf); return err ? err : count; } - static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store); -static struct attribute *ctcm_group_attrs[] = { +static struct attribute *ctcm_drv_attrs[] = { &driver_attr_group.attr, NULL, }; - -static struct attribute_group ctcm_group_attr_group = { - .attrs = ctcm_group_attrs, +static struct attribute_group ctcm_drv_attr_group = { + .attrs = ctcm_drv_attrs, }; - -static const struct attribute_group *ctcm_group_attr_groups[] = { - &ctcm_group_attr_group, +static const struct attribute_group *ctcm_drv_attr_groups[] = { + &ctcm_drv_attr_group, NULL, }; @@ -1823,7 +1806,6 @@ static const struct attribute_group *ctcm_group_attr_groups[] = { */ static void __exit ctcm_exit(void) { - driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group); ccwgroup_driver_unregister(&ctcm_group_driver); ccw_driver_unregister(&ctcm_ccw_driver); root_device_unregister(ctcm_root_dev); @@ -1855,13 +1837,13 @@ static int __init ctcm_init(void) if (ret) goto out_err; ctcm_root_dev = root_device_register("ctcm"); - ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0; + ret = PTR_ERR_OR_ZERO(ctcm_root_dev); if (ret) goto register_err; ret = ccw_driver_register(&ctcm_ccw_driver); if (ret) goto ccw_err; - ctcm_group_driver.driver.groups = ctcm_group_attr_groups; + ctcm_group_driver.driver.groups = ctcm_drv_attr_groups; ret = ccwgroup_driver_register(&ctcm_group_driver); if (ret) goto ccwgroup_err; diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h index d34fa14f44e..477c933685f 100644 --- a/drivers/s390/net/ctcm_main.h +++ b/drivers/s390/net/ctcm_main.h @@ -1,6 +1,4 @@ /* - * drivers/s390/net/ctcm_main.h - * * Copyright IBM Corp. 2001, 2007 * Authors: Fritz Elfert (felfert@millenux.com) * Peter Tiedemann (ptiedem@de.ibm.com) @@ -111,8 +109,8 @@ enum ctcm_channel_types { #define CTCM_INITIAL_BLOCKLEN 2 -#define READ 0 -#define WRITE 1 +#define CTCM_READ 0 +#define CTCM_WRITE 1 #define CTCM_ID_SIZE 20+3 @@ -225,13 +223,7 @@ struct ctcm_priv { int ctcm_open(struct net_device *dev); int ctcm_close(struct net_device *dev); -/* - * prototypes for non-static sysfs functions - */ -int ctcm_add_attributes(struct device *dev); -void ctcm_remove_attributes(struct device *dev); -int ctcm_add_files(struct device *dev); -void ctcm_remove_files(struct device *dev); +extern const struct attribute_group *ctcm_attr_groups[]; /* * Compatibility macros for busy handling diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 5978b390153..2dbc77b5137 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/ctcm_mpc.c - * * Copyright IBM Corp. 2004, 2007 * Authors: Belinda Thompson (belindat@us.ibm.com) * Andy Richter (richtera@us.ibm.com) @@ -53,8 +51,8 @@ #include <linux/moduleparam.h> #include <asm/idals.h> -#include "ctcm_mpc.h" #include "ctcm_main.h" +#include "ctcm_mpc.h" #include "ctcm_fsms.h" static const struct xid2 init_xid = { @@ -132,7 +130,7 @@ void ctcmpc_dumpit(char *buf, int len) __u32 ct, sw, rm, dup; char *ptr, *rptr; char tbuf[82], tdup[82]; - #if (UTS_MACHINE == s390x) + #ifdef CONFIG_64BIT char addr[22]; #else char addr[12]; @@ -149,8 +147,8 @@ void ctcmpc_dumpit(char *buf, int len) for (ct = 0; ct < len; ct++, ptr++, rptr++) { if (sw == 0) { - #if (UTS_MACHINE == s390x) - sprintf(addr, "%16.16lx", (__u64)rptr); + #ifdef CONFIG_64BIT + sprintf(addr, "%16.16llx", (__u64)rptr); #else sprintf(addr, "%8.8X", (__u32)rptr); #endif @@ -164,8 +162,8 @@ void ctcmpc_dumpit(char *buf, int len) if (sw == 8) strcat(bhex, " "); - #if (UTS_MACHINE == s390x) - sprintf(tbuf, "%2.2lX", (__u64)*ptr); + #if CONFIG_64BIT + sprintf(tbuf, "%2.2llX", (__u64)*ptr); #else sprintf(tbuf, "%2.2X", (__u32)*ptr); #endif @@ -419,8 +417,8 @@ void ctc_mpc_establish_connectivity(int port_num, return; priv = dev->ml_priv; grp = priv->mpcg; - rch = priv->channel[READ]; - wch = priv->channel[WRITE]; + rch = priv->channel[CTCM_READ]; + wch = priv->channel[CTCM_WRITE]; CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, "%s(%s): state=%s", @@ -540,7 +538,7 @@ void ctc_mpc_dealloc_ch(int port_num) CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, "%s: %s: refcount = %d\n", - CTCM_FUNTAIL, dev->name, atomic_read(&dev->refcnt)); + CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev)); fsm_deltimer(&priv->restart_timer); grp->channels_terminating = 0; @@ -578,7 +576,7 @@ void ctc_mpc_flow_control(int port_num, int flowc) "%s: %s: flowc = %d", CTCM_FUNTAIL, dev->name, flowc); - rch = priv->channel[READ]; + rch = priv->channel[CTCM_READ]; mpcg_state = fsm_getstate(grp->fsm); switch (flowc) { @@ -622,7 +620,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo) struct net_device *dev = rch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg; - struct channel *ch = priv->channel[WRITE]; + struct channel *ch = priv->channel[CTCM_WRITE]; CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id); CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); @@ -653,10 +651,9 @@ static void ctcmpc_send_sweep_resp(struct channel *rch) struct net_device *dev = rch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg; - int rc = 0; struct th_sweep *header; struct sk_buff *sweep_skb; - struct channel *ch = priv->channel[WRITE]; + struct channel *ch = priv->channel[CTCM_WRITE]; CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id); @@ -665,17 +662,14 @@ static void ctcmpc_send_sweep_resp(struct channel *rch) CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, "%s(%s): sweep_skb allocation ERROR\n", CTCM_FUNTAIL, rch->id); - rc = -ENOMEM; - goto done; + goto done; } - header = (struct th_sweep *) - kmalloc(sizeof(struct th_sweep), gfp_type()); + header = kmalloc(sizeof(struct th_sweep), gfp_type()); if (!header) { dev_kfree_skb_any(sweep_skb); - rc = -ENOMEM; - goto done; + goto done; } header->th.th_seg = 0x00 ; @@ -713,7 +707,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) struct net_device *dev = rch->netdev; struct ctcm_priv *priv = dev->ml_priv; struct mpc_group *grp = priv->mpcg; - struct channel *ch = priv->channel[WRITE]; + struct channel *ch = priv->channel[CTCM_WRITE]; if (do_debug) CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, @@ -722,8 +716,8 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) if (grp->in_sweep == 0) { grp->in_sweep = 1; ctcm_test_and_set_busy(dev); - grp->sweep_req_pend_num = grp->active_channels[READ]; - grp->sweep_rsp_pend_num = grp->active_channels[READ]; + grp->sweep_req_pend_num = grp->active_channels[CTCM_READ]; + grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ]; } CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); @@ -907,14 +901,14 @@ void mpc_group_ready(unsigned long adev) fsm_newstate(grp->fsm, MPCG_STATE_READY); /* Put up a read on the channel */ - ch = priv->channel[READ]; + ch = priv->channel[CTCM_READ]; ch->pdu_seq = 0; CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" , __func__, ch->pdu_seq); ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); /* Put the write channel in idle state */ - ch = priv->channel[WRITE]; + ch = priv->channel[CTCM_WRITE]; if (ch->collect_len > 0) { spin_lock(&ch->collect_lock); ctcm_purge_skb_queue(&ch->collect_queue); @@ -961,7 +955,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action) "%s: %i / Grp:%s total_channels=%i, active_channels: " "read=%i, write=%i\n", __func__, action, fsm_getstate_str(grp->fsm), grp->num_channel_paths, - grp->active_channels[READ], grp->active_channels[WRITE]); + grp->active_channels[CTCM_READ], + grp->active_channels[CTCM_WRITE]); if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) { grp->num_channel_paths++; @@ -995,10 +990,11 @@ void mpc_channel_action(struct channel *ch, int direction, int action) grp->xid_skb->data, grp->xid_skb->len); - ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ) + ch->xid->xid2_dlc_type = + ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_READ_SIDE : XID2_WRITE_SIDE); - if (CHANNEL_DIRECTION(ch->flags) == WRITE) + if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ch->xid->xid2_buf_len = 0x00; ch->xid_skb->data = ch->xid_skb_data; @@ -1007,8 +1003,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action) fsm_newstate(ch->fsm, CH_XID0_PENDING); - if ((grp->active_channels[READ] > 0) && - (grp->active_channels[WRITE] > 0) && + if ((grp->active_channels[CTCM_READ] > 0) && + (grp->active_channels[CTCM_WRITE] > 0) && (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, @@ -1028,10 +1024,10 @@ void mpc_channel_action(struct channel *ch, int direction, int action) if (grp->channels_terminating) goto done; - if (((grp->active_channels[READ] == 0) && - (grp->active_channels[WRITE] > 0)) - || ((grp->active_channels[WRITE] == 0) && - (grp->active_channels[READ] > 0))) + if (((grp->active_channels[CTCM_READ] == 0) && + (grp->active_channels[CTCM_WRITE] > 0)) + || ((grp->active_channels[CTCM_WRITE] == 0) && + (grp->active_channels[CTCM_READ] > 0))) fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); } done: @@ -1039,7 +1035,8 @@ done: "exit %s: %i / Grp:%s total_channels=%i, active_channels: " "read=%i, write=%i\n", __func__, action, fsm_getstate_str(grp->fsm), grp->num_channel_paths, - grp->active_channels[READ], grp->active_channels[WRITE]); + grp->active_channels[CTCM_READ], + grp->active_channels[CTCM_WRITE]); CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id); } @@ -1191,8 +1188,7 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) skb_pull(pskb, new_len); /* point to next PDU */ } } else { - mpcginfo = (struct mpcg_info *) - kmalloc(sizeof(struct mpcg_info), gfp_type()); + mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type()); if (mpcginfo == NULL) goto done; @@ -1369,10 +1365,8 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) struct net_device *dev = arg; struct ctcm_priv *priv; struct mpc_group *grp; - int rc = 0; - struct channel *wch, *rch; + struct channel *wch; - BUG_ON(dev == NULL); CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); priv = dev->ml_priv; @@ -1394,8 +1388,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) (grp->port_persist == 0)) fsm_deltimer(&priv->restart_timer); - wch = priv->channel[WRITE]; - rch = priv->channel[READ]; + wch = priv->channel[CTCM_WRITE]; switch (grp->saved_state) { case MPCG_STATE_RESET: @@ -1434,7 +1427,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) if (grp->send_qllc_disc == 1) { grp->send_qllc_disc = 0; - rc = mpc_send_qllc_discontact(dev); + mpc_send_qllc_discontact(dev); } /* DO NOT issue DEV_EVENT_STOP directly out of this code */ @@ -1478,12 +1471,10 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) struct channel *wch; struct channel *rch; - BUG_ON(dev == NULL); - priv = dev->ml_priv; grp = priv->mpcg; - wch = priv->channel[WRITE]; - rch = priv->channel[READ]; + wch = priv->channel[CTCM_WRITE]; + rch = priv->channel[CTCM_READ]; switch (fsm_getstate(grp->fsm)) { case MPCG_STATE_XID2INITW: @@ -1588,7 +1579,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo) CTCM_D3_DUMP((char *)xid, XID2_LENGTH); /*the received direction should be the opposite of ours */ - if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE : + if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE : XID2_READ_SIDE) != xid->xid2_dlc_type) { rc = 2; /* XID REJECTED: r/w channel pairing mismatch */ @@ -1914,7 +1905,7 @@ static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) if (grp == NULL) return; - for (direction = READ; direction <= WRITE; direction++) { + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { struct channel *ch = priv->channel[direction]; struct xid2 *thisxid = ch->xid; ch->xid_skb->data = ch->xid_skb_data; @@ -2154,14 +2145,15 @@ static int mpc_send_qllc_discontact(struct net_device *dev) return -ENOMEM; } - *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq; - priv->channel[READ]->pdu_seq++; + *((__u32 *)skb_push(skb, 4)) = + priv->channel[CTCM_READ]->pdu_seq; + priv->channel[CTCM_READ]->pdu_seq++; CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n", - __func__, priv->channel[READ]->pdu_seq); + __func__, priv->channel[CTCM_READ]->pdu_seq); /* receipt of CC03 resets anticipated sequence number on receiving side */ - priv->channel[READ]->pdu_seq = 0x00; + priv->channel[CTCM_READ]->pdu_seq = 0x00; skb_reset_mac_header(skb); skb->dev = dev; skb->protocol = htons(ETH_P_SNAP); diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h index 5336120cddf..bd1b1cc54ff 100644 --- a/drivers/s390/net/ctcm_mpc.h +++ b/drivers/s390/net/ctcm_mpc.h @@ -1,6 +1,4 @@ /* - * drivers/s390/net/ctcm_mpc.h - * * Copyright IBM Corp. 2007 * Authors: Peter Tiedemann (ptiedem@de.ibm.com) * @@ -12,6 +10,7 @@ #ifndef _CTC_MPC_H_ #define _CTC_MPC_H_ +#include <linux/interrupt.h> #include <linux/skbuff.h> #include "fsm.h" diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c index 738ad26c74a..6bcfbbb20f0 100644 --- a/drivers/s390/net/ctcm_sysfs.c +++ b/drivers/s390/net/ctcm_sysfs.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/ctcm_sysfs.c - * * Copyright IBM Corp. 2007, 2007 * Authors: Peter Tiedemann (ptiedem@de.ibm.com) * @@ -13,7 +11,9 @@ #define KMSG_COMPONENT "ctcm" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/device.h> #include <linux/sysfs.h> +#include <linux/slab.h> #include "ctcm_main.h" /* @@ -34,16 +34,19 @@ static ssize_t ctcm_buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct net_device *ndev; - int bs1; + unsigned int bs1; struct ctcm_priv *priv = dev_get_drvdata(dev); + int rc; - if (!(priv && priv->channel[READ] && - (ndev = priv->channel[READ]->netdev))) { + ndev = priv->channel[CTCM_READ]->netdev; + if (!(priv && priv->channel[CTCM_READ] && ndev)) { CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev"); return -ENODEV; } - sscanf(buf, "%u", &bs1); + rc = sscanf(buf, "%u", &bs1); + if (rc != 1) + goto einval; if (bs1 > CTCM_BUFSIZE_LIMIT) goto einval; if (bs1 < (576 + LL_HEADER_LENGTH + 2)) @@ -54,12 +57,12 @@ static ssize_t ctcm_buffer_write(struct device *dev, (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2))) goto einval; - priv->channel[READ]->max_bufsize = bs1; - priv->channel[WRITE]->max_bufsize = bs1; + priv->channel[CTCM_READ]->max_bufsize = bs1; + priv->channel[CTCM_WRITE]->max_bufsize = bs1; if (!(ndev->flags & IFF_RUNNING)) ndev->mtu = bs1 - LL_HEADER_LENGTH - 2; - priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; - priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; + priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; + priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; CTCM_DBF_DEV(SETUP, ndev, buf); return count; @@ -84,9 +87,9 @@ static void ctcm_print_statistics(struct ctcm_priv *priv) p += sprintf(p, " Device FSM state: %s\n", fsm_getstate_str(priv->fsm)); p += sprintf(p, " RX channel FSM state: %s\n", - fsm_getstate_str(priv->channel[READ]->fsm)); + fsm_getstate_str(priv->channel[CTCM_READ]->fsm)); p += sprintf(p, " TX channel FSM state: %s\n", - fsm_getstate_str(priv->channel[WRITE]->fsm)); + fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm)); p += sprintf(p, " Max. TX buffer used: %ld\n", priv->channel[WRITE]->prof.maxmulti); p += sprintf(p, " Max. chained SKBs: %ld\n", @@ -101,16 +104,18 @@ static void ctcm_print_statistics(struct ctcm_priv *priv) priv->channel[WRITE]->prof.tx_time); printk(KERN_INFO "Statistics for %s:\n%s", - priv->channel[WRITE]->netdev->name, sbuf); + priv->channel[CTCM_WRITE]->netdev->name, sbuf); kfree(sbuf); return; } static ssize_t stats_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ctcm_priv *priv = dev_get_drvdata(dev); - if (!priv) + + if (!priv || gdev->state != CCWGROUP_ONLINE) return -ENODEV; ctcm_print_statistics(priv); return sprintf(buf, "0\n"); @@ -124,7 +129,7 @@ static ssize_t stats_write(struct device *dev, struct device_attribute *attr, return -ENODEV; /* Reset statistics */ memset(&priv->channel[WRITE]->prof, 0, - sizeof(priv->channel[WRITE]->prof)); + sizeof(priv->channel[CTCM_WRITE]->prof)); return count; } @@ -141,13 +146,14 @@ static ssize_t ctcm_proto_show(struct device *dev, static ssize_t ctcm_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int value; + int value, rc; struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; - sscanf(buf, "%u", &value); - if (!((value == CTCM_PROTO_S390) || + rc = sscanf(buf, "%d", &value); + if ((rc != 1) || + !((value == CTCM_PROTO_S390) || (value == CTCM_PROTO_LINUX) || (value == CTCM_PROTO_MPC) || (value == CTCM_PROTO_OS390))) @@ -158,7 +164,7 @@ static ssize_t ctcm_proto_store(struct device *dev, return count; } -const char *ctcm_type[] = { +static const char *ctcm_type[] = { "not a channel", "CTC/A", "FICON channel", @@ -189,34 +195,14 @@ static struct attribute *ctcm_attr[] = { &dev_attr_protocol.attr, &dev_attr_type.attr, &dev_attr_buffer.attr, + &dev_attr_stats.attr, NULL, }; static struct attribute_group ctcm_attr_group = { .attrs = ctcm_attr, }; - -int ctcm_add_attributes(struct device *dev) -{ - int rc; - - rc = device_create_file(dev, &dev_attr_stats); - - return rc; -} - -void ctcm_remove_attributes(struct device *dev) -{ - device_remove_file(dev, &dev_attr_stats); -} - -int ctcm_add_files(struct device *dev) -{ - return sysfs_create_group(&dev->kobj, &ctcm_attr_group); -} - -void ctcm_remove_files(struct device *dev) -{ - sysfs_remove_group(&dev->kobj, &ctcm_attr_group); -} - +const struct attribute_group *ctcm_attr_groups[] = { + &ctcm_attr_group, + NULL, +}; diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c index cae48cbc5e9..e5dea67f902 100644 --- a/drivers/s390/net/fsm.c +++ b/drivers/s390/net/fsm.c @@ -5,6 +5,7 @@ #include "fsm.h" #include <linux/module.h> +#include <linux/slab.h> #include <linux/timer.h> MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h index 1e8b235d95b..a4510cf5903 100644 --- a/drivers/s390/net/fsm.h +++ b/drivers/s390/net/fsm.h @@ -8,7 +8,7 @@ #include <linux/slab.h> #include <linux/sched.h> #include <linux/string.h> -#include <asm/atomic.h> +#include <linux/atomic.h> /** * Define this to get debugging messages. diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index f6cc46dc050..0a7d87c372b 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -30,13 +30,13 @@ #include <linux/if.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> -#include <linux/trdevice.h> #include <linux/fddidevice.h> #include <linux/inetdevice.h> #include <linux/in.h> #include <linux/igmp.h> #include <linux/delay.h> #include <linux/kthread.h> +#include <linux/slab.h> #include <net/arp.h> #include <net/ip.h> @@ -49,8 +49,7 @@ #include "lcs.h" -#if !defined(CONFIG_NET_ETHERNET) && \ - !defined(CONFIG_TR) && !defined(CONFIG_FDDI) +#if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI) #error Cannot compile lcs.c without some net devices switched on. #endif @@ -283,7 +282,7 @@ lcs_setup_write_ccws(struct lcs_card *card) LCS_DBF_TEXT(3, setup, "iwritccw"); /* Setup write ccws. */ - memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1); + memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1)); for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE; card->write.ccws[cnt].count = 0; @@ -839,7 +838,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) } /** - * Emit buffer of a lan comand. + * Emit buffer of a lan command. */ static void lcs_lancmd_timeout(unsigned long data) @@ -900,6 +899,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, add_timer(&timer); wait_event(reply->wait_q, reply->received); del_timer_sync(&timer); + destroy_timer_on_stack(&timer); LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); rc = reply->rc; lcs_put_reply(reply); @@ -1121,7 +1121,7 @@ list_modified: list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){ switch (ipm->ipm_state) { case LCS_IPM_STATE_SET_REQUIRED: - /* del from ipm_list so noone else can tamper with + /* del from ipm_list so no one else can tamper with * this entry */ list_del_init(&ipm->list); spin_unlock_irqrestore(&card->ipm_lock, flags); @@ -1165,10 +1165,7 @@ static void lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) { LCS_DBF_TEXT(4,trace, "getmac"); - if (dev->type == ARPHRD_IEEE802_TR) - ip_tr_mc_map(ipm, mac); - else - ip_eth_mc_map(ipm, mac); + ip_eth_mc_map(ipm, mac); } /** @@ -1187,7 +1184,8 @@ lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) spin_lock_irqsave(&card->ipm_lock, flags); list_for_each(l, &card->ipm_list) { ipm = list_entry(l, struct lcs_ipm_list, list); - for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) { + for (im4 = rcu_dereference(in4_dev->mc_list); + im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) { lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); if ( (ipm->ipm.ip_addr == im4->multiaddr) && (memcmp(buf, &ipm->ipm.mac_addr, @@ -1232,13 +1230,13 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) unsigned long flags; LCS_DBF_TEXT(4, trace, "setmclst"); - for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { + for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; + im4 = rcu_dereference(im4->next_rcu)) { lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); ipm = lcs_check_addr_entry(card, im4, buf); if (ipm != NULL) continue; /* Address already in list. */ - ipm = (struct lcs_ipm_list *) - kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); + ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); if (ipm == NULL) { pr_info("Not enough memory to add" " new multicast entry!\n"); @@ -1269,10 +1267,10 @@ lcs_register_mc_addresses(void *data) in4_dev = in_dev_get(card->dev); if (in4_dev == NULL) goto out; - read_lock(&in4_dev->mc_list_lock); + rcu_read_lock(); lcs_remove_mc_addresses(card,in4_dev); lcs_set_mc_addresses(card, in4_dev); - read_unlock(&in4_dev->mc_list_lock); + rcu_read_unlock(); in_dev_put(in4_dev); netif_carrier_off(card->dev); @@ -1479,7 +1477,6 @@ lcs_tasklet(unsigned long data) struct lcs_channel *channel; struct lcs_buffer *iob; int buf_idx; - int rc; channel = (struct lcs_channel *) data; LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev)); @@ -1496,14 +1493,11 @@ lcs_tasklet(unsigned long data) channel->buf_idx = buf_idx; if (channel->state == LCS_CH_STATE_STOPPED) - // FIXME: what if rc != 0 ?? - rc = lcs_start_channel(channel); + lcs_start_channel(channel); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); if (channel->state == LCS_CH_STATE_SUSPENDED && - channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) { - // FIXME: what if rc != 0 ?? - rc = __lcs_resume_channel(channel); - } + channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) + __lcs_resume_channel(channel); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); /* Something happened on the channel. Wake up waiters. */ @@ -1636,19 +1630,13 @@ lcs_startlan_auto(struct lcs_card *card) int rc; LCS_DBF_TEXT(2, trace, "strtauto"); -#ifdef CONFIG_NET_ETHERNET +#ifdef CONFIG_ETHERNET card->lan_type = LCS_FRAME_TYPE_ENET; rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); if (rc == 0) return 0; #endif -#ifdef CONFIG_TR - card->lan_type = LCS_FRAME_TYPE_TR; - rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); - if (rc == 0) - return 0; -#endif #ifdef CONFIG_FDDI card->lan_type = LCS_FRAME_TYPE_FDDI; rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); @@ -1955,14 +1943,16 @@ static ssize_t lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lcs_card *card; - int value; + int value, rc; card = dev_get_drvdata(dev); if (!card) return 0; - sscanf(buf, "%u", &value); + rc = sscanf(buf, "%d", &value); + if (rc != 1) + return -EINVAL; /* TODO: sanity checks */ card->portno = value; @@ -1972,7 +1962,7 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); -const char *lcs_type[] = { +static const char *lcs_type[] = { "not a channel", "2216 parallel", "2216 channel", @@ -2009,14 +1999,17 @@ static ssize_t lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lcs_card *card; - int value; + unsigned int value; + int rc; card = dev_get_drvdata(dev); if (!card) return 0; - sscanf(buf, "%u", &value); + rc = sscanf(buf, "%u", &value); + if (rc != 1) + return -EINVAL; /* TODO: sanity checks */ card->lancmd_timeout = value; @@ -2053,10 +2046,17 @@ static struct attribute * lcs_attrs[] = { &dev_attr_recover.attr, NULL, }; - static struct attribute_group lcs_attr_group = { .attrs = lcs_attrs, }; +static const struct attribute_group *lcs_attr_groups[] = { + &lcs_attr_group, + NULL, +}; +static const struct device_type lcs_devtype = { + .name = "lcs", + .groups = lcs_attr_groups, +}; /** * lcs_probe_device is called on establishing a new ccwgroup_device. @@ -2065,7 +2065,6 @@ static int lcs_probe_device(struct ccwgroup_device *ccwgdev) { struct lcs_card *card; - int ret; if (!get_device(&ccwgdev->dev)) return -ENODEV; @@ -2077,12 +2076,6 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev) put_device(&ccwgdev->dev); return -ENOMEM; } - ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group); - if (ret) { - lcs_free_card(card); - put_device(&ccwgdev->dev); - return ret; - } dev_set_drvdata(&ccwgdev->dev, card); ccwgdev->cdev[0]->handler = lcs_irq; ccwgdev->cdev[1]->handler = lcs_irq; @@ -2091,7 +2084,9 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev) card->thread_start_mask = 0; card->thread_allowed_mask = 0; card->thread_running_mask = 0; - return 0; + ccwgdev->dev.type = &lcs_devtype; + + return 0; } static int @@ -2122,7 +2117,7 @@ static const struct net_device_ops lcs_mc_netdev_ops = { .ndo_stop = lcs_stop_device, .ndo_get_stats = lcs_getstats, .ndo_start_xmit = lcs_start_xmit, - .ndo_set_multicast_list = lcs_set_multicast_list, + .ndo_set_rx_mode = lcs_set_multicast_list, }; static int @@ -2168,18 +2163,12 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) goto netdev_out; } switch (card->lan_type) { -#ifdef CONFIG_NET_ETHERNET +#ifdef CONFIG_ETHERNET case LCS_FRAME_TYPE_ENET: card->lan_type_trans = eth_type_trans; dev = alloc_etherdev(0); break; #endif -#ifdef CONFIG_TR - case LCS_FRAME_TYPE_TR: - card->lan_type_trans = tr_type_trans; - dev = alloc_trdev(0); - break; -#endif #ifdef CONFIG_FDDI case LCS_FRAME_TYPE_FDDI: card->lan_type_trans = fddi_type_trans; @@ -2242,7 +2231,7 @@ __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode) { struct lcs_card *card; enum lcs_dev_states recover_state; - int ret; + int ret = 0, ret2 = 0, ret3 = 0; LCS_DBF_TEXT(3, setup, "shtdndev"); card = dev_get_drvdata(&ccwgdev->dev); @@ -2257,13 +2246,15 @@ __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode) recover_state = card->state; ret = lcs_stop_device(card->dev); - ret = ccw_device_set_offline(card->read.ccwdev); - ret = ccw_device_set_offline(card->write.ccwdev); + ret2 = ccw_device_set_offline(card->read.ccwdev); + ret3 = ccw_device_set_offline(card->write.ccwdev); + if (!ret) + ret = (ret2) ? ret2 : ret3; + if (ret) + LCS_DBF_TEXT_(3, setup, "1err:%d", ret); if (recover_state == DEV_STATE_UP) { card->state = DEV_STATE_RECOVER; } - if (ret) - return ret; return 0; } @@ -2323,9 +2314,9 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev) } if (card->dev) unregister_netdev(card->dev); - sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group); lcs_cleanup_card(card); lcs_free_card(card); + dev_set_drvdata(&ccwgdev->dev, NULL); put_device(&ccwgdev->dev); } @@ -2392,22 +2383,25 @@ static struct ccw_device_id lcs_ids[] = { MODULE_DEVICE_TABLE(ccw, lcs_ids); static struct ccw_driver lcs_ccw_driver = { - .owner = THIS_MODULE, - .name = "lcs", + .driver = { + .owner = THIS_MODULE, + .name = "lcs", + }, .ids = lcs_ids, .probe = ccwgroup_probe_ccwdev, .remove = ccwgroup_remove_ccwdev, + .int_class = IRQIO_LCS, }; /** * LCS ccwgroup driver registration */ static struct ccwgroup_driver lcs_group_driver = { - .owner = THIS_MODULE, - .name = "lcs", - .max_slaves = 2, - .driver_id = 0xD3C3E2, - .probe = lcs_probe_device, + .driver = { + .owner = THIS_MODULE, + .name = "lcs", + }, + .setup = lcs_probe_device, .remove = lcs_remove_device, .set_online = lcs_new_device, .set_offline = lcs_shutdown_device, @@ -2418,30 +2412,24 @@ static struct ccwgroup_driver lcs_group_driver = { .restore = lcs_restore, }; -static ssize_t -lcs_driver_group_store(struct device_driver *ddrv, const char *buf, - size_t count) +static ssize_t lcs_driver_group_store(struct device_driver *ddrv, + const char *buf, size_t count) { int err; - err = ccwgroup_create_from_string(lcs_root_dev, - lcs_group_driver.driver_id, - &lcs_ccw_driver, 2, buf); + err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf); return err ? err : count; } - static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store); -static struct attribute *lcs_group_attrs[] = { +static struct attribute *lcs_drv_attrs[] = { &driver_attr_group.attr, NULL, }; - -static struct attribute_group lcs_group_attr_group = { - .attrs = lcs_group_attrs, +static struct attribute_group lcs_drv_attr_group = { + .attrs = lcs_drv_attrs, }; - -static const struct attribute_group *lcs_group_attr_groups[] = { - &lcs_group_attr_group, +static const struct attribute_group *lcs_drv_attr_groups[] = { + &lcs_drv_attr_group, NULL, }; @@ -2459,13 +2447,13 @@ __init lcs_init_module(void) if (rc) goto out_err; lcs_root_dev = root_device_register("lcs"); - rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0; + rc = PTR_ERR_OR_ZERO(lcs_root_dev); if (rc) goto register_err; rc = ccw_driver_register(&lcs_ccw_driver); if (rc) goto ccw_err; - lcs_group_driver.driver.groups = lcs_group_attr_groups; + lcs_group_driver.driver.groups = lcs_drv_attr_groups; rc = ccwgroup_driver_register(&lcs_group_driver); if (rc) goto ccwgroup_err; @@ -2491,8 +2479,6 @@ __exit lcs_cleanup_module(void) { pr_info("Terminating lcs module.\n"); LCS_DBF_TEXT(0, trace, "cleanup"); - driver_remove_file(&lcs_group_driver.driver, - &driver_attr_group); ccwgroup_driver_unregister(&lcs_group_driver); ccw_driver_unregister(&lcs_ccw_driver); root_device_unregister(lcs_root_dev); diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h index 8c03392ac83..150fcb4cebc 100644 --- a/drivers/s390/net/lcs.h +++ b/drivers/s390/net/lcs.h @@ -16,15 +16,9 @@ do { \ debug_event(lcs_dbf_##name,level,(void*)(addr),len); \ } while (0) -/* Allow to sort out low debug levels early to avoid wasted sprints */ -static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level) -{ - return (level <= dbf_grp->level); -} - #define LCS_DBF_TEXT_(level,name,text...) \ do { \ - if (lcs_dbf_passes(lcs_dbf_##name, level)) { \ + if (debug_level_enabled(lcs_dbf_##name, level)) { \ sprintf(debug_buffer, text); \ debug_text_event(lcs_dbf_##name, level, debug_buffer); \ } \ diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 65ebee0a326..ce16d1bdb20 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -63,6 +63,7 @@ #include <asm/io.h> #include <asm/uaccess.h> +#include <asm/ebcdic.h> #include <net/iucv/iucv.h> #include "fsm.h" @@ -75,7 +76,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); * Debug Facility stuff */ #define IUCV_DBF_SETUP_NAME "iucv_setup" -#define IUCV_DBF_SETUP_LEN 32 +#define IUCV_DBF_SETUP_LEN 64 #define IUCV_DBF_SETUP_PAGES 2 #define IUCV_DBF_SETUP_NR_AREAS 1 #define IUCV_DBF_SETUP_LEVEL 3 @@ -104,15 +105,9 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); -/* Allow to sort out low debug levels early to avoid wasted sprints */ -static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level) -{ - return (level <= dbf_grp->level); -} - #define IUCV_DBF_TEXT_(name, level, text...) \ do { \ - if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ + if (debug_level_enabled(iucv_dbf_##name, level)) { \ char* __buf = get_cpu_var(iucv_dbf_txt_buf); \ sprintf(__buf, text); \ debug_text_event(iucv_dbf_##name, level, __buf); \ @@ -129,26 +124,6 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level) /** * some more debug stuff */ -#define IUCV_HEXDUMP16(importance,header,ptr) \ -PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ - *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ - *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ - *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ - *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ - *(((char*)ptr)+12),*(((char*)ptr)+13), \ - *(((char*)ptr)+14),*(((char*)ptr)+15)); \ -PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ - *(((char*)ptr)+16),*(((char*)ptr)+17), \ - *(((char*)ptr)+18),*(((char*)ptr)+19), \ - *(((char*)ptr)+20),*(((char*)ptr)+21), \ - *(((char*)ptr)+22),*(((char*)ptr)+23), \ - *(((char*)ptr)+24),*(((char*)ptr)+25), \ - *(((char*)ptr)+26),*(((char*)ptr)+27), \ - *(((char*)ptr)+28),*(((char*)ptr)+29), \ - *(((char*)ptr)+30),*(((char*)ptr)+31)); - #define PRINTK_HEADER " iucv: " /* for debugging */ /* dummy device to make sure netiucv_pm functions are called */ @@ -226,6 +201,7 @@ struct iucv_connection { struct net_device *netdev; struct connection_profile prof; char userid[9]; + char userdata[17]; }; /** @@ -263,7 +239,7 @@ struct ll_header { }; #define NETIUCV_HDRLEN (sizeof(struct ll_header)) -#define NETIUCV_BUFSIZE_MAX 32768 +#define NETIUCV_BUFSIZE_MAX 65537 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) #define NETIUCV_MTU_DEFAULT 9216 @@ -288,7 +264,12 @@ static inline int netiucv_test_and_set_busy(struct net_device *dev) return test_and_set_bit(0, &priv->tbusy); } -static u8 iucvMagic[16] = { +static u8 iucvMagic_ascii[16] = { + 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 +}; + +static u8 iucvMagic_ebcdic[16] = { 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 }; @@ -301,18 +282,38 @@ static u8 iucvMagic[16] = { * * @returns The printable string (static data!!) */ -static char *netiucv_printname(char *name) +static char *netiucv_printname(char *name, int len) { - static char tmp[9]; + static char tmp[17]; char *p = tmp; - memcpy(tmp, name, 8); - tmp[8] = '\0'; - while (*p && (!isspace(*p))) + memcpy(tmp, name, len); + tmp[len] = '\0'; + while (*p && ((p - tmp) < len) && (!isspace(*p))) p++; *p = '\0'; return tmp; } +static char *netiucv_printuser(struct iucv_connection *conn) +{ + static char tmp_uid[9]; + static char tmp_udat[17]; + static char buf[100]; + + if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) { + tmp_uid[8] = '\0'; + tmp_udat[16] = '\0'; + memcpy(tmp_uid, conn->userid, 8); + memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8); + memcpy(tmp_udat, conn->userdata, 16); + EBCASC(tmp_udat, 16); + memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16); + sprintf(buf, "%s.%s", tmp_uid, tmp_udat); + return buf; + } else + return netiucv_printname(conn->userid, 8); +} + /** * States of the interface statemachine. */ @@ -563,15 +564,18 @@ static int netiucv_callback_connreq(struct iucv_path *path, { struct iucv_connection *conn = path->private; struct iucv_event ev; + static char tmp_user[9]; + static char tmp_udat[17]; int rc; - if (memcmp(iucvMagic, ipuser, sizeof(ipuser))) - /* ipuser must match iucvMagic. */ - return -EINVAL; rc = -EINVAL; + memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8); + memcpy(tmp_udat, ipuser, 16); + EBCASC(tmp_udat, 16); read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(conn, &iucv_connection_list, list) { - if (strncmp(ipvmid, conn->userid, 8)) + if (strncmp(ipvmid, conn->userid, 8) || + strncmp(ipuser, conn->userdata, 16)) continue; /* Found a matching connection for this path. */ conn->path = path; @@ -580,6 +584,8 @@ static int netiucv_callback_connreq(struct iucv_path *path, fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); rc = 0; } + IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n", + tmp_user, netiucv_printname(tmp_udat, 16)); read_unlock_bh(&iucv_connection_rwlock); return rc; } @@ -733,8 +739,12 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 4, __func__); - if (conn && conn->netdev) - privptr = netdev_priv(conn->netdev); + if (!conn || !conn->netdev) { + IUCV_DBF_TEXT(data, 2, + "Send confirmation for unlinked connection\n"); + return; + } + privptr = netdev_priv(conn->netdev); conn->prof.tx_pending--; if (single_flag) { if ((skb = skb_dequeue(&conn->commit_queue))) { @@ -816,7 +826,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) conn->path = path; path->msglim = NETIUCV_QUEUELEN_DEFAULT; path->flags = 0; - rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); + rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn); if (rc) { IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); return; @@ -854,7 +864,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); - iucv_path_sever(conn->path, NULL); + iucv_path_sever(conn->path, conn->userdata); fsm_newstate(fi, CONN_STATE_STARTWAIT); } @@ -867,9 +877,9 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); - iucv_path_sever(conn->path, NULL); - dev_info(privptr->dev, "The peer interface of the IUCV device" - " has closed the connection\n"); + iucv_path_sever(conn->path, conn->userdata); + dev_info(privptr->dev, "The peer z/VM guest %s has closed the " + "connection\n", netiucv_printuser(conn)); IUCV_DBF_TEXT(data, 2, "conn_action_connsever: Remote dropped connection\n"); fsm_newstate(fi, CONN_STATE_STARTWAIT); @@ -886,8 +896,6 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) IUCV_DBF_TEXT(trace, 3, __func__); fsm_newstate(fi, CONN_STATE_STARTWAIT); - IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", - netdev->name, conn->userid); /* * We must set the state before calling iucv_connect because the @@ -897,8 +905,11 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, CONN_STATE_SETUPWAIT); conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); + IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n", + netdev->name, netiucv_printuser(conn)); + rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, - NULL, iucvMagic, conn); + NULL, conn->userdata, conn); switch (rc) { case 0: netdev->tx_queue_len = conn->path->msglim; @@ -908,13 +919,13 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) case 11: dev_warn(privptr->dev, "The IUCV device failed to connect to z/VM guest %s\n", - netiucv_printname(conn->userid)); + netiucv_printname(conn->userid, 8)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 12: dev_warn(privptr->dev, "The IUCV device failed to connect to the peer on z/VM" - " guest %s\n", netiucv_printname(conn->userid)); + " guest %s\n", netiucv_printname(conn->userid, 8)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 13: @@ -927,7 +938,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) dev_err(privptr->dev, "z/VM guest %s has too many IUCV connections" " to connect with the IUCV device\n", - netiucv_printname(conn->userid)); + netiucv_printname(conn->userid, 8)); fsm_newstate(fi, CONN_STATE_CONNERR); break; case 15: @@ -972,7 +983,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg) netiucv_purge_skb_queue(&conn->collect_queue); if (conn->path) { IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); - iucv_path_sever(conn->path, iucvMagic); + iucv_path_sever(conn->path, conn->userdata); kfree(conn->path); conn->path = NULL; } @@ -1090,7 +1101,8 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) fsm_newstate(fi, DEV_STATE_RUNNING); dev_info(privptr->dev, "The IUCV device has been connected" - " successfully to %s\n", privptr->conn->userid); + " successfully to %s\n", + netiucv_printuser(privptr->conn)); IUCV_DBF_TEXT(setup, 3, "connection is up and running\n"); break; @@ -1452,45 +1464,72 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr, struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); - return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); + return sprintf(buf, "%s\n", netiucv_printuser(priv->conn)); } -static ssize_t user_write(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int netiucv_check_user(const char *buf, size_t count, char *username, + char *userdata) { - struct netiucv_priv *priv = dev_get_drvdata(dev); - struct net_device *ndev = priv->conn->netdev; - char *p; - char *tmp; - char username[9]; - int i; - struct iucv_connection *cp; + const char *p; + int i; - IUCV_DBF_TEXT(trace, 3, __func__); - if (count > 9) { - IUCV_DBF_TEXT_(setup, 2, - "%d is length of username\n", (int) count); + p = strchr(buf, '.'); + if ((p && ((count > 26) || + ((p - buf) > 8) || + (buf + count - p > 18))) || + (!p && (count > 9))) { + IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); return -EINVAL; } - tmp = strsep((char **) &buf, "\n"); - for (i = 0, p = tmp; i < 8 && *p; i++, p++) { - if (isalnum(*p) || (*p == '$')) { - username[i]= toupper(*p); + for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) { + if (isalnum(*p) || *p == '$') { + username[i] = toupper(*p); continue; } - if (*p == '\n') { + if (*p == '\n') /* trailing lf, grr */ break; - } IUCV_DBF_TEXT_(setup, 2, - "username: invalid character %c\n", *p); + "conn_write: invalid character %02x\n", *p); return -EINVAL; } while (i < 8) username[i++] = ' '; username[8] = '\0'; + if (*p == '.') { + p++; + for (i = 0; i < 16 && *p; i++, p++) { + if (*p == '\n') + break; + userdata[i] = toupper(*p); + } + while (i > 0 && i < 16) + userdata[i++] = ' '; + } else + memcpy(userdata, iucvMagic_ascii, 16); + userdata[16] = '\0'; + ASCEBC(userdata, 16); + + return 0; +} + +static ssize_t user_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->conn->netdev; + char username[9]; + char userdata[17]; + int rc; + struct iucv_connection *cp; + + IUCV_DBF_TEXT(trace, 3, __func__); + rc = netiucv_check_user(buf, count, username, userdata); + if (rc) + return rc; + if (memcmp(username, priv->conn->userid, 9) && (ndev->flags & (IFF_UP | IFF_RUNNING))) { /* username changed while the interface is active. */ @@ -1499,15 +1538,17 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr, } read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { - if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { + if (!strncmp(username, cp->userid, 9) && + !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) { read_unlock_bh(&iucv_connection_rwlock); - IUCV_DBF_TEXT_(setup, 2, "user_write: Connection " - "to %s already exists\n", username); + IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s " + "already exists\n", netiucv_printuser(cp)); return -EEXIST; } } read_unlock_bh(&iucv_connection_rwlock); memcpy(priv->conn->userid, username, 9); + memcpy(priv->conn->userdata, userdata, 17); return count; } @@ -1537,7 +1578,8 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, bs1 = simple_strtoul(buf, &e, 0); if (e && (!isspace(*e))) { - IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); + IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n", + *e); return -EINVAL; } if (bs1 > NETIUCV_BUFSIZE_MAX) { @@ -1790,26 +1832,11 @@ static struct attribute_group netiucv_stat_attr_group = { .attrs = netiucv_stat_attrs, }; -static int netiucv_add_files(struct device *dev) -{ - int ret; - - IUCV_DBF_TEXT(trace, 3, __func__); - ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group); - if (ret) - return ret; - ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group); - if (ret) - sysfs_remove_group(&dev->kobj, &netiucv_attr_group); - return ret; -} - -static void netiucv_remove_files(struct device *dev) -{ - IUCV_DBF_TEXT(trace, 3, __func__); - sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); - sysfs_remove_group(&dev->kobj, &netiucv_attr_group); -} +static const struct attribute_group *netiucv_attr_groups[] = { + &netiucv_stat_attr_group, + &netiucv_attr_group, + NULL, +}; static int netiucv_register_device(struct net_device *ndev) { @@ -1823,6 +1850,7 @@ static int netiucv_register_device(struct net_device *ndev) dev_set_name(dev, "net%s", ndev->name); dev->bus = &iucv_bus; dev->parent = iucv_root; + dev->groups = netiucv_attr_groups; /* * The release function could be called after the * module has been unloaded. It's _only_ task is to @@ -1840,22 +1868,14 @@ static int netiucv_register_device(struct net_device *ndev) put_device(dev); return ret; } - ret = netiucv_add_files(dev); - if (ret) - goto out_unreg; priv->dev = dev; dev_set_drvdata(dev, priv); return 0; - -out_unreg: - device_unregister(dev); - return ret; } static void netiucv_unregister_device(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __func__); - netiucv_remove_files(dev); device_unregister(dev); } @@ -1864,7 +1884,8 @@ static void netiucv_unregister_device(struct device *dev) * Add it to the list of netiucv connections; */ static struct iucv_connection *netiucv_new_connection(struct net_device *dev, - char *username) + char *username, + char *userdata) { struct iucv_connection *conn; @@ -1893,6 +1914,8 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev, fsm_settimer(conn->fsm, &conn->timer); fsm_newstate(conn->fsm, CONN_STATE_INVALID); + if (userdata) + memcpy(conn->userdata, userdata, 17); if (username) { memcpy(conn->userid, username, 9); fsm_newstate(conn->fsm, CONN_STATE_STOPPED); @@ -1919,6 +1942,7 @@ out: */ static void netiucv_remove_connection(struct iucv_connection *conn) { + IUCV_DBF_TEXT(trace, 3, __func__); write_lock_bh(&iucv_connection_rwlock); list_del_init(&conn->list); @@ -1926,7 +1950,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn) fsm_deltimer(&conn->timer); netiucv_purge_skb_queue(&conn->collect_queue); if (conn->path) { - iucv_path_sever(conn->path, iucvMagic); + iucv_path_sever(conn->path, conn->userdata); kfree(conn->path); conn->path = NULL; } @@ -1985,7 +2009,7 @@ static void netiucv_setup_netdevice(struct net_device *dev) /** * Allocate and initialize everything of a net device. */ -static struct net_device *netiucv_init_netdevice(char *username) +static struct net_device *netiucv_init_netdevice(char *username, char *userdata) { struct netiucv_priv *privptr; struct net_device *dev; @@ -1994,6 +2018,7 @@ static struct net_device *netiucv_init_netdevice(char *username) netiucv_setup_netdevice); if (!dev) return NULL; + rtnl_lock(); if (dev_alloc_name(dev, dev->name) < 0) goto out_netdev; @@ -2004,7 +2029,7 @@ static struct net_device *netiucv_init_netdevice(char *username) if (!privptr->fsm) goto out_netdev; - privptr->conn = netiucv_new_connection(dev, username); + privptr->conn = netiucv_new_connection(dev, username, userdata); if (!privptr->conn) { IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); goto out_fsm; @@ -2015,6 +2040,7 @@ static struct net_device *netiucv_init_netdevice(char *username) out_fsm: kfree_fsm(privptr->fsm); out_netdev: + rtnl_unlock(); free_netdev(dev); return NULL; } @@ -2022,47 +2048,31 @@ out_netdev: static ssize_t conn_write(struct device_driver *drv, const char *buf, size_t count) { - const char *p; char username[9]; - int i, rc; + char userdata[17]; + int rc; struct net_device *dev; struct netiucv_priv *priv; struct iucv_connection *cp; IUCV_DBF_TEXT(trace, 3, __func__); - if (count>9) { - IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); - return -EINVAL; - } - - for (i = 0, p = buf; i < 8 && *p; i++, p++) { - if (isalnum(*p) || *p == '$') { - username[i] = toupper(*p); - continue; - } - if (*p == '\n') - /* trailing lf, grr */ - break; - IUCV_DBF_TEXT_(setup, 2, - "conn_write: invalid character %c\n", *p); - return -EINVAL; - } - while (i < 8) - username[i++] = ' '; - username[8] = '\0'; + rc = netiucv_check_user(buf, count, username, userdata); + if (rc) + return rc; read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { - if (!strncmp(username, cp->userid, 9)) { + if (!strncmp(username, cp->userid, 9) && + !strncmp(userdata, cp->userdata, 17)) { read_unlock_bh(&iucv_connection_rwlock); - IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection " - "to %s already exists\n", username); + IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s " + "already exists\n", netiucv_printuser(cp)); return -EEXIST; } } read_unlock_bh(&iucv_connection_rwlock); - dev = netiucv_init_netdevice(username); + dev = netiucv_init_netdevice(username, userdata); if (!dev) { IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); return -ENODEV; @@ -2070,6 +2080,7 @@ static ssize_t conn_write(struct device_driver *drv, rc = netiucv_register_device(dev); if (rc) { + rtnl_unlock(); IUCV_DBF_TEXT_(setup, 2, "ret %d from netiucv_register_device\n", rc); goto out_free_ndev; @@ -2079,12 +2090,14 @@ static ssize_t conn_write(struct device_driver *drv, priv = netdev_priv(dev); SET_NETDEV_DEV(dev, priv->dev); - rc = register_netdev(dev); + rc = register_netdevice(dev); + rtnl_unlock(); if (rc) goto out_unreg; - dev_info(priv->dev, "The IUCV interface to %s has been" - " established successfully\n", netiucv_printname(username)); + dev_info(priv->dev, "The IUCV interface to %s has been established " + "successfully\n", + netiucv_printuser(priv->conn)); return count; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index fcd005aad98..a2088af51cc 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -1,6 +1,4 @@ /* - * drivers/s390/net/qeth_core.h - * * Copyright IBM Corp. 2007 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, @@ -13,8 +11,6 @@ #include <linux/if.h> #include <linux/if_arp.h> -#include <linux/if_tr.h> -#include <linux/trdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/ctype.h> @@ -40,11 +36,7 @@ */ enum qeth_dbf_names { QETH_DBF_SETUP, - QETH_DBF_QERR, - QETH_DBF_TRACE, QETH_DBF_MSG, - QETH_DBF_SENSE, - QETH_DBF_MISC, QETH_DBF_CTRL, QETH_DBF_INFOS /* must be last element */ }; @@ -71,7 +63,19 @@ struct qeth_dbf_info { debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) #define QETH_DBF_TEXT_(name, level, text...) \ - qeth_dbf_longtext(QETH_DBF_##name, level, text) + qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text) + +#define QETH_CARD_TEXT(card, level, text) \ + debug_text_event(card->debug, level, text) + +#define QETH_CARD_HEX(card, level, addr, len) \ + debug_event(card->debug, level, (void *)(addr), len) + +#define QETH_CARD_MESSAGE(card, text...) \ + debug_sprintf_event(card->debug, level, text) + +#define QETH_CARD_TEXT_(card, level, text...) \ + qeth_dbf_longtext(card->debug, level, text) #define SENSE_COMMAND_REJECT_BYTE 0 #define SENSE_COMMAND_REJECT_FLAG 0x80 @@ -102,6 +106,10 @@ struct qeth_perf_stats { unsigned int sc_dp_p; unsigned int sc_p_dp; + /* qdio_cq_handler: number of times called, time spent in */ + __u64 cq_start_time; + unsigned int cq_cnt; + unsigned int cq_time; /* qdio_input_handler: number of times called, time spent in */ __u64 inbound_start_time; unsigned int inbound_cnt; @@ -148,6 +156,27 @@ struct qeth_ipa_info { __u32 enabled_funcs; }; +/* SETBRIDGEPORT stuff */ +enum qeth_sbp_roles { + QETH_SBP_ROLE_NONE = 0, + QETH_SBP_ROLE_PRIMARY = 1, + QETH_SBP_ROLE_SECONDARY = 2, +}; + +enum qeth_sbp_states { + QETH_SBP_STATE_INACTIVE = 0, + QETH_SBP_STATE_STANDBY = 1, + QETH_SBP_STATE_ACTIVE = 2, +}; + +#define QETH_SBP_HOST_NOTIFICATION 1 + +struct qeth_sbp_info { + __u32 supported_funcs; + enum qeth_sbp_roles role; + __u32 hostnotification:1; +}; + static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) { @@ -179,25 +208,22 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, ((prot == QETH_PROT_IPV6) ? \ qeth_is_enabled6(c, f) : qeth_is_enabled(c, f)) -#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101 -#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101 -#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108 -#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108 +#define QETH_IDX_FUNC_LEVEL_OSD 0x0101 +#define QETH_IDX_FUNC_LEVEL_IQD 0x4108 #define QETH_MODELLIST_ARRAY \ - {{0x1731, 0x01, 0x1732, 0x01, QETH_CARD_TYPE_OSAE, 1, \ - QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \ - QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \ - QETH_MAX_QUEUES, 0}, \ - {0x1731, 0x05, 0x1732, 0x05, QETH_CARD_TYPE_IQD, 0, \ - QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \ - QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \ - QETH_MAX_QUEUES, 0x103}, \ - {0x1731, 0x06, 0x1732, 0x06, QETH_CARD_TYPE_OSN, 0, \ - QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \ - QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \ - QETH_MAX_QUEUES, 0}, \ - {0, 0, 0, 0, 0, 0, 0, 0, 0} } + {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \ + {0x1731, 0x05, 0x1732, QETH_CARD_TYPE_IQD, QETH_MAX_QUEUES, 0x103}, \ + {0x1731, 0x06, 0x1732, QETH_CARD_TYPE_OSN, QETH_MAX_QUEUES, 0}, \ + {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSM, QETH_MAX_QUEUES, 0}, \ + {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSX, QETH_MAX_QUEUES, 0}, \ + {0, 0, 0, 0, 0, 0} } +#define QETH_CU_TYPE_IND 0 +#define QETH_CU_MODEL_IND 1 +#define QETH_DEV_TYPE_IND 2 +#define QETH_DEV_MODEL_IND 3 +#define QETH_QUEUE_NO_IND 4 +#define QETH_MULTICAST_IND 5 #define QETH_REAL_CARD 1 #define QETH_VLAN_CARD 2 @@ -208,6 +234,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, */ #define QETH_TX_TIMEOUT 100 * HZ #define QETH_RCD_TIMEOUT 60 * HZ +#define QETH_RECLAIM_WORK_TIME HZ #define QETH_HEADER_SIZE 32 #define QETH_MAX_PORTNO 15 @@ -220,12 +247,13 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, /*****************************************************************************/ #define QETH_MAX_QUEUES 4 #define QETH_IN_BUF_SIZE_DEFAULT 65536 -#define QETH_IN_BUF_COUNT_DEFAULT 16 +#define QETH_IN_BUF_COUNT_DEFAULT 64 +#define QETH_IN_BUF_COUNT_HSDEFAULT 128 #define QETH_IN_BUF_COUNT_MIN 8 #define QETH_IN_BUF_COUNT_MAX 128 #define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) #define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ - ((card)->qdio.in_buf_pool.buf_count / 2) + ((card)->qdio.in_buf_pool.buf_count / 2) /* buffers we have to be behind before we get a PCI */ #define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) @@ -240,10 +268,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, #define QETH_NO_PRIO_QUEUEING 0 #define QETH_PRIO_Q_ING_PREC 1 #define QETH_PRIO_Q_ING_TOS 2 -#define IP_TOS_LOWDELAY 0x10 -#define IP_TOS_HIGHTHROUGHPUT 0x08 -#define IP_TOS_HIGHRELIABILITY 0x04 -#define IP_TOS_NOTIMPORTANT 0x02 +#define QETH_PRIO_Q_ING_SKB 3 +#define QETH_PRIO_Q_ING_VLAN 4 /* Packing */ #define QETH_LOW_WATERMARK_PACK 2 @@ -254,6 +280,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, /* large receive scatter gather copy break */ #define QETH_RX_SG_CB (PAGE_SIZE >> 1) +#define QETH_RX_PULL_LEN 256 struct qeth_hdr_layer3 { __u8 id; @@ -351,11 +378,11 @@ enum qeth_header_ids { #define QETH_HDR_EXT_SRC_MAC_ADDR 0x08 #define QETH_HDR_EXT_CSUM_HDR_REQ 0x10 #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 -#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/ +#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) { - return (sbale->flags & SBAL_FLAGS_LAST_ENTRY); + return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); } enum qeth_qdio_buffer_states { @@ -369,6 +396,21 @@ enum qeth_qdio_buffer_states { * outbound: filled by driver; owned by hardware in order to be sent */ QETH_QDIO_BUF_PRIMED, + /* + * inbound: not applicable + * outbound: identified to be pending in TPQ + */ + QETH_QDIO_BUF_PENDING, + /* + * inbound: not applicable + * outbound: found in completion queue + */ + QETH_QDIO_BUF_IN_CQ, + /* + * inbound: not applicable + * outbound: handled via transfer pending / completion queue + */ + QETH_QDIO_BUF_HANDLED_DELAYED, }; enum qeth_qdio_info_states { @@ -393,6 +435,7 @@ struct qeth_qdio_buffer { struct qdio_buffer *buffer; /* the buffer pool entry currently associated to this buffer */ struct qeth_buffer_pool_entry *pool_entry; + struct sk_buff *rx_skb; }; struct qeth_qdio_q { @@ -401,19 +444,16 @@ struct qeth_qdio_q { int next_buf_to_init; } __attribute__ ((aligned(256))); -/* possible types of qeth large_send support */ -enum qeth_large_send_types { - QETH_LARGE_SEND_NO, - QETH_LARGE_SEND_TSO, -}; - struct qeth_qdio_out_buffer { struct qdio_buffer *buffer; atomic_t state; int next_element_to_fill; struct sk_buff_head skb_list; - struct list_head ctx_list; int is_header[16]; + + struct qaob *aob; + struct qeth_qdio_out_q *q; + struct qeth_qdio_out_buffer *next_pending; }; struct qeth_card; @@ -426,7 +466,8 @@ enum qeth_out_q_states { struct qeth_qdio_out_q { struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; - struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qdio_outbuf_state *bufstates; /* convenience pointer */ int queue_no; struct qeth_card *card; atomic_t state; @@ -435,7 +476,6 @@ struct qeth_qdio_out_q { * index of buffer to be filled by driver; state EMPTY or PACKING */ int next_buf_to_fill; - int sync_iqdio_error; /* * number of buffers that are currently filled (PRIMED) * -> these buffers are hardware-owned @@ -448,7 +488,9 @@ struct qeth_qdio_out_q { struct qeth_qdio_info { atomic_t state; /* input */ + int no_in_queues; struct qeth_qdio_q *in_q; + struct qeth_qdio_q *c_q; struct qeth_qdio_buffer_pool in_buf_pool; struct qeth_qdio_buffer_pool init_pool; int in_buf_size; @@ -456,6 +498,7 @@ struct qeth_qdio_info { /* output */ int no_out_queues; struct qeth_qdio_out_q **out_qs; + struct qdio_outbuf_state *out_bufstates; /* priority queueing */ int do_prio_queueing; @@ -527,6 +570,12 @@ enum qeth_cmd_buffer_state { BUF_STATE_PROCESSED, }; +enum qeth_cq { + QETH_CQ_DISABLED = 0, + QETH_CQ_ENABLED = 1, + QETH_CQ_NOTAVAILABLE = 2, +}; + struct qeth_ipato { int enabled; int invert4; @@ -630,7 +679,10 @@ struct qeth_card_info { int unique_id; struct qeth_card_blkt blkt; __u32 csum_mask; + __u32 tx_csum_mask; enum qeth_ipa_promisc_modes promisc_mode; + __u32 diagass_support; + __u32 hwtrap; }; struct qeth_card_options { @@ -639,17 +691,17 @@ struct qeth_card_options { struct qeth_ipa_info adp; /*Adapter parameters*/ struct qeth_routing_info route6; struct qeth_ipa_info ipa6; - enum qeth_checksum_types checksum_type; - int broadcast_mode; - int macaddr_mode; + struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */ int fake_broadcast; int add_hhlen; int layer2; - enum qeth_large_send_types large_send; int performance_stats; int rx_sg_cb; enum qeth_ipa_isolation_modes isolation; + enum qeth_ipa_isolation_modes prev_isolation; int sniffer; + enum qeth_cq cq; + char hsuid[9]; }; /* @@ -670,10 +722,22 @@ enum qeth_discipline_id { }; struct qeth_discipline { + void (*start_poll)(struct ccw_device *, int, unsigned long); qdio_handler_t *input_handler; qdio_handler_t *output_handler; int (*recover)(void *ptr); - struct ccwgroup_driver *ccwgdriver; + int (*setup) (struct ccwgroup_device *); + void (*remove) (struct ccwgroup_device *); + int (*set_online) (struct ccwgroup_device *); + int (*set_offline) (struct ccwgroup_device *); + void (*shutdown)(struct ccwgroup_device *); + int (*prepare) (struct ccwgroup_device *); + void (*complete) (struct ccwgroup_device *); + int (*freeze)(struct ccwgroup_device *); + int (*thaw) (struct ccwgroup_device *); + int (*restore)(struct ccwgroup_device *); + int (*control_event_handler)(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); }; struct qeth_vlan_vid { @@ -688,13 +752,21 @@ struct qeth_mc_mac { int is_vmac; }; -struct qeth_skb_data { - __u32 magic; - int count; +struct qeth_rx { + int b_count; + int b_index; + struct qdio_buffer_element *b_element; + int e_offset; + int qdio_err; }; -#define QETH_SKB_MAGIC 0x71657468 -#define QETH_SIGA_CC2_RETRIES 3 +struct carrier_info { + __u8 card_type; + __u16 port_mode; + __u32 port_speed; +}; + +#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT struct qeth_card { struct list_head list; @@ -717,7 +789,7 @@ struct qeth_card { wait_queue_head_t wait_q; spinlock_t vlanlock; spinlock_t mclock; - struct vlan_group *vlangrp; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct list_head vid_list; struct list_head mc_list; struct work_struct kernel_thread_starter; @@ -725,6 +797,7 @@ struct qeth_card { unsigned long thread_start_mask; unsigned long thread_allowed_mask; unsigned long thread_running_mask; + struct task_struct *recovery_task; spinlock_t ip_lock; struct list_head ip_list; struct list_head *ip_tbd_list; @@ -733,12 +806,20 @@ struct qeth_card { /* QDIO buffer handling */ struct qeth_qdio_info qdio; struct qeth_perf_stats perf_stats; - int use_hard_stop; + int read_or_write_problem; struct qeth_osn_info osn_info; - struct qeth_discipline discipline; + struct qeth_discipline *discipline; atomic_t force_alloc_skb; struct service_level qeth_service_level; struct qdio_ssqd_desc ssqd; + debug_info_t *debug; + struct mutex conf_mutex; + struct mutex discipline_mutex; + struct napi_struct napi; + struct qeth_rx rx; + struct delayed_work buffer_reclaim_work; + int reclaim_index; + struct work_struct close_dev_work; }; struct qeth_card_list_struct { @@ -746,6 +827,14 @@ struct qeth_card_list_struct { rwlock_t rwlock; }; +struct qeth_trap_id { + __u16 lparnr; + char vmname[8]; + __u8 chpid; + __u8 ssid; + __u16 devno; +} __packed; + /*some helper functions*/ #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") @@ -758,13 +847,16 @@ static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) static inline int qeth_get_micros(void) { - return (int) (get_clock() >> 12); + return (int) (get_tod_clock() >> 12); } static inline int qeth_get_ip_version(struct sk_buff *skb) { - struct ethhdr *ehdr = (struct ethhdr *)skb->data; - switch (ehdr->h_proto) { + __be16 *p = &((struct ethhdr *)skb->data)->h_proto; + + if (*p == ETH_P_8021Q) + p += 2; + switch (*p) { case ETH_P_IPV6: return 6; case ETH_P_IP: @@ -780,22 +872,31 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); } -extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; -extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; +static inline int qeth_is_diagass_supported(struct qeth_card *card, + enum qeth_diags_cmds cmd) +{ + return card->info.diagass_support & (__u32)cmd; +} + +extern struct qeth_discipline qeth_l2_discipline; +extern struct qeth_discipline qeth_l3_discipline; +extern const struct attribute_group *qeth_generic_attr_groups[]; +extern const struct attribute_group *qeth_osn_attr_groups[]; +extern struct workqueue_struct *qeth_wq; + const char *qeth_get_cardname_short(struct qeth_card *); int qeth_realloc_buffer_pool(struct qeth_card *, int); int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); void qeth_core_free_discipline(struct qeth_card *); -int qeth_core_create_device_attributes(struct device *); -void qeth_core_remove_device_attributes(struct device *); -int qeth_core_create_osn_attributes(struct device *); -void qeth_core_remove_osn_attributes(struct device *); +void qeth_buffer_reclaim_work(struct work_struct *); /* exports for qeth discipline device drivers */ extern struct qeth_card_list_struct qeth_core_card_list; extern struct kmem_cache *qeth_core_header_cache; extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; +void qeth_set_recovery_task(struct qeth_card *); +void qeth_clear_recovery_task(struct qeth_card *); void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); int qeth_threads_running(struct qeth_card *, unsigned long); int qeth_wait_for_threads(struct qeth_card *, unsigned long); @@ -818,9 +919,13 @@ int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *, unsigned int, const char *); void qeth_queue_input_buffer(struct qeth_card *, int); struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, - struct qdio_buffer *, struct qdio_buffer_element **, int *, + struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, struct qeth_hdr **); void qeth_schedule_recovery(struct qeth_card *); +void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long); +void qeth_qdio_input_handler(struct ccw_device *, + unsigned int, unsigned int, int, + int, unsigned long); void qeth_qdio_output_handler(struct ccw_device *, unsigned int, int, int, int, unsigned long); void qeth_clear_ipacmd_list(struct qeth_card *); @@ -840,14 +945,19 @@ void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char); struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); int qeth_mdio_read(struct net_device *, int, int); int qeth_snmp_command(struct qeth_card *, char __user *); -struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32); -int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *, - unsigned long); +int qeth_query_oat_command(struct qeth_card *, char __user *); +int qeth_query_card_info(struct qeth_card *card, + struct carrier_info *carrier_info); int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param); +int qeth_bridgeport_query_ports(struct qeth_card *card, + enum qeth_sbp_roles *role, enum qeth_sbp_states *state); +int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); +int qeth_bridgeport_an_set(struct qeth_card *card, int enable); int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); -int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); +int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int); +int qeth_get_elements_for_frags(struct sk_buff *); int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, struct sk_buff *, struct qeth_hdr *, int, int, int); int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, @@ -857,9 +967,15 @@ void qeth_core_get_ethtool_stats(struct net_device *, struct ethtool_stats *, u64 *); void qeth_core_get_strings(struct net_device *, u32, u8 *); void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); -void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); +void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); -int qeth_set_access_ctrl_online(struct qeth_card *card); +int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); +int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int); +int qeth_configure_cq(struct qeth_card *, enum qeth_cq); +int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); +int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); +void qeth_trace_features(struct qeth_card *); +void qeth_close_dev(struct qeth_card *); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 3bd4206f347..f54bec54d67 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/qeth_core_main.c - * * Copyright IBM Corp. 2007, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, @@ -20,9 +18,15 @@ #include <linux/tcp.h> #include <linux/mii.h> #include <linux/kthread.h> +#include <linux/slab.h> +#include <net/iucv/af_iucv.h> +#include <net/dsfield.h> #include <asm/ebcdic.h> +#include <asm/chpid.h> #include <asm/io.h> +#include <asm/sysinfo.h> +#include <asm/compat.h> #include "qeth_core.h" @@ -31,16 +35,8 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { /* N P A M L V H */ [QETH_DBF_SETUP] = {"qeth_setup", 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, - [QETH_DBF_QERR] = {"qeth_qerr", - 2, 1, 8, 2, &debug_hex_ascii_view, NULL}, - [QETH_DBF_TRACE] = {"qeth_trace", - 4, 1, 8, 3, &debug_hex_ascii_view, NULL}, - [QETH_DBF_MSG] = {"qeth_msg", - 8, 1, 128, 3, &debug_sprintf_view, NULL}, - [QETH_DBF_SENSE] = {"qeth_sense", - 2, 1, 64, 2, &debug_hex_ascii_view, NULL}, - [QETH_DBF_MISC] = {"qeth_misc", - 2, 1, 256, 2, &debug_hex_ascii_view, NULL}, + [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, + &debug_sprintf_view, NULL}, [QETH_DBF_CTRL] = {"qeth_control", 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, }; @@ -50,10 +46,12 @@ struct qeth_card_list_struct qeth_core_card_list; EXPORT_SYMBOL_GPL(qeth_core_card_list); struct kmem_cache *qeth_core_header_cache; EXPORT_SYMBOL_GPL(qeth_core_header_cache); +static struct kmem_cache *qeth_qdio_outbuf_cache; static struct device *qeth_core_root_dev; -static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY; +static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY; static struct lock_class_key qdio_out_skb_queue_key; +static struct mutex qeth_mod_mutex; static void qeth_send_control_data_cb(struct qeth_channel *, struct qeth_cmd_buffer *); @@ -62,69 +60,65 @@ static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); +static void qeth_free_qdio_buffers(struct qeth_card *); +static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + enum iucv_tx_notify notification); +static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); +static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + enum qeth_qdio_buffer_states newbufstate); +static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); +struct workqueue_struct *qeth_wq; +EXPORT_SYMBOL_GPL(qeth_wq); -static inline void __qeth_fill_buffer_frag(struct sk_buff *skb, - struct qdio_buffer *buffer, int is_tso, - int *next_element_to_fill) +static void qeth_close_dev_handler(struct work_struct *work) { - struct skb_frag_struct *frag; - int fragno; - unsigned long addr; - int element, cnt, dlen; + struct qeth_card *card; - fragno = skb_shinfo(skb)->nr_frags; - element = *next_element_to_fill; - dlen = 0; + card = container_of(work, struct qeth_card, close_dev_work); + QETH_CARD_TEXT(card, 2, "cldevhdl"); + rtnl_lock(); + dev_close(card->dev); + rtnl_unlock(); + ccwgroup_set_offline(card->gdev); +} - if (is_tso) - buffer->element[element].flags = - SBAL_FLAGS_MIDDLE_FRAG; - else - buffer->element[element].flags = - SBAL_FLAGS_FIRST_FRAG; - dlen = skb->len - skb->data_len; - if (dlen) { - buffer->element[element].addr = skb->data; - buffer->element[element].length = dlen; - element++; - } - for (cnt = 0; cnt < fragno; cnt++) { - frag = &skb_shinfo(skb)->frags[cnt]; - addr = (page_to_pfn(frag->page) << PAGE_SHIFT) + - frag->page_offset; - buffer->element[element].addr = (char *)addr; - buffer->element[element].length = frag->size; - if (cnt < (fragno - 1)) - buffer->element[element].flags = - SBAL_FLAGS_MIDDLE_FRAG; - else - buffer->element[element].flags = - SBAL_FLAGS_LAST_FRAG; - element++; - } - *next_element_to_fill = element; +void qeth_close_dev(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 2, "cldevsubm"); + queue_work(qeth_wq, &card->close_dev_work); } +EXPORT_SYMBOL_GPL(qeth_close_dev); static inline const char *qeth_get_cardname(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { - case QETH_CARD_TYPE_OSAE: - return " Guest LAN QDIO"; + case QETH_CARD_TYPE_OSD: + return " Virtual NIC QDIO"; case QETH_CARD_TYPE_IQD: - return " Guest LAN Hiper"; + return " Virtual NIC Hiper"; + case QETH_CARD_TYPE_OSM: + return " Virtual NIC QDIO - OSM"; + case QETH_CARD_TYPE_OSX: + return " Virtual NIC QDIO - OSX"; default: return " unknown"; } } else { switch (card->info.type) { - case QETH_CARD_TYPE_OSAE: + case QETH_CARD_TYPE_OSD: return " OSD Express"; case QETH_CARD_TYPE_IQD: return " HiperSockets"; case QETH_CARD_TYPE_OSN: return " OSN QDIO"; + case QETH_CARD_TYPE_OSM: + return " OSM QDIO"; + case QETH_CARD_TYPE_OSX: + return " OSX QDIO"; default: return " unknown"; } @@ -137,16 +131,20 @@ const char *qeth_get_cardname_short(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { - case QETH_CARD_TYPE_OSAE: - return "GuestLAN QDIO"; + case QETH_CARD_TYPE_OSD: + return "Virt.NIC QDIO"; case QETH_CARD_TYPE_IQD: - return "GuestLAN Hiper"; + return "Virt.NIC Hiper"; + case QETH_CARD_TYPE_OSM: + return "Virt.NIC OSM"; + case QETH_CARD_TYPE_OSX: + return "Virt.NIC OSX"; default: return "unknown"; } } else { switch (card->info.type) { - case QETH_CARD_TYPE_OSAE: + case QETH_CARD_TYPE_OSD: switch (card->info.link_type) { case QETH_LINK_TYPE_FAST_ETH: return "OSD_100"; @@ -171,6 +169,10 @@ const char *qeth_get_cardname_short(struct qeth_card *card) return "HiperSockets"; case QETH_CARD_TYPE_OSN: return "OSN"; + case QETH_CARD_TYPE_OSM: + return "OSM_1000"; + case QETH_CARD_TYPE_OSX: + return "OSX_10GIG"; default: return "unknown"; } @@ -178,6 +180,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card) return "n/a"; } +void qeth_set_recovery_task(struct qeth_card *card) +{ + card->recovery_task = current; +} +EXPORT_SYMBOL_GPL(qeth_set_recovery_task); + +void qeth_clear_recovery_task(struct qeth_card *card) +{ + card->recovery_task = NULL; +} +EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); + +static bool qeth_is_recovery_task(const struct qeth_card *card) +{ + return card->recovery_task == current; +} + void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, int clear_start_mask) { @@ -206,6 +225,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running); int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) { + if (qeth_is_recovery_task(card)) + return 0; return wait_event_interruptible(card->wait_q, qeth_threads_running(card, threads) == 0); } @@ -215,7 +236,7 @@ void qeth_clear_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; - QETH_DBF_TEXT(TRACE, 5, "clwrklst"); + QETH_CARD_TEXT(card, 5, "clwrklst"); list_for_each_entry_safe(pool_entry, tmp, &card->qdio.in_buf_pool.entry_list, list){ list_del(&pool_entry->list); @@ -229,9 +250,9 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card) void *ptr; int i, j; - QETH_DBF_TEXT(TRACE, 5, "alocpool"); + QETH_CARD_TEXT(card, 5, "alocpool"); for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { - pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL); + pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); if (!pool_entry) { qeth_free_buffer_pool(card); return -ENOMEM; @@ -256,7 +277,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card) int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) { - QETH_DBF_TEXT(TRACE, 2, "realcbp"); + QETH_CARD_TEXT(card, 2, "realcbp"); if ((card->state != CARD_STATE_DOWN) && (card->state != CARD_STATE_RECOVER)) @@ -271,12 +292,211 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) } EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); +static inline int qeth_cq_init(struct qeth_card *card) +{ + int rc; + + if (card->options.cq == QETH_CQ_ENABLED) { + QETH_DBF_TEXT(SETUP, 2, "cqinit"); + memset(card->qdio.c_q->qdio_bufs, 0, + QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); + card->qdio.c_q->next_buf_to_init = 127; + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, + card->qdio.no_in_queues - 1, 0, + 127); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + goto out; + } + } + rc = 0; +out: + return rc; +} + +static inline int qeth_alloc_cq(struct qeth_card *card) +{ + int rc; + + if (card->options.cq == QETH_CQ_ENABLED) { + int i; + struct qdio_outbuf_state *outbuf_states; + + QETH_DBF_TEXT(SETUP, 2, "cqon"); + card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q), + GFP_KERNEL); + if (!card->qdio.c_q) { + rc = -1; + goto kmsg_out; + } + QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *)); + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { + card->qdio.c_q->bufs[i].buffer = + &card->qdio.c_q->qdio_bufs[i]; + } + + card->qdio.no_in_queues = 2; + + card->qdio.out_bufstates = + kzalloc(card->qdio.no_out_queues * + QDIO_MAX_BUFFERS_PER_Q * + sizeof(struct qdio_outbuf_state), GFP_KERNEL); + outbuf_states = card->qdio.out_bufstates; + if (outbuf_states == NULL) { + rc = -1; + goto free_cq_out; + } + for (i = 0; i < card->qdio.no_out_queues; ++i) { + card->qdio.out_qs[i]->bufstates = outbuf_states; + outbuf_states += QDIO_MAX_BUFFERS_PER_Q; + } + } else { + QETH_DBF_TEXT(SETUP, 2, "nocq"); + card->qdio.c_q = NULL; + card->qdio.no_in_queues = 1; + } + QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues); + rc = 0; +out: + return rc; +free_cq_out: + kfree(card->qdio.c_q); + card->qdio.c_q = NULL; +kmsg_out: + dev_err(&card->gdev->dev, "Failed to create completion queue\n"); + goto out; +} + +static inline void qeth_free_cq(struct qeth_card *card) +{ + if (card->qdio.c_q) { + --card->qdio.no_in_queues; + kfree(card->qdio.c_q); + card->qdio.c_q = NULL; + } + kfree(card->qdio.out_bufstates); + card->qdio.out_bufstates = NULL; +} + +static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, + int delayed) { + enum iucv_tx_notify n; + + switch (sbalf15) { + case 0: + n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; + break; + case 4: + case 16: + case 17: + case 18: + n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : + TX_NOTIFY_UNREACHABLE; + break; + default: + n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : + TX_NOTIFY_GENERALERROR; + break; + } + + return n; +} + +static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, + int bidx, int forced_cleanup) +{ + if (q->card->options.cq != QETH_CQ_ENABLED) + return; + + if (q->bufs[bidx]->next_pending != NULL) { + struct qeth_qdio_out_buffer *head = q->bufs[bidx]; + struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; + + while (c) { + if (forced_cleanup || + atomic_read(&c->state) == + QETH_QDIO_BUF_HANDLED_DELAYED) { + struct qeth_qdio_out_buffer *f = c; + QETH_CARD_TEXT(f->q->card, 5, "fp"); + QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); + /* release here to avoid interleaving between + outbound tasklet and inbound tasklet + regarding notifications and lifecycle */ + qeth_release_skbs(c); + + c = f->next_pending; + WARN_ON_ONCE(head->next_pending != f); + head->next_pending = c; + kmem_cache_free(qeth_qdio_outbuf_cache, f); + } else { + head = c; + c = c->next_pending; + } + + } + } + if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == + QETH_QDIO_BUF_HANDLED_DELAYED)) { + /* for recovery situations */ + q->bufs[bidx]->aob = q->bufstates[bidx].aob; + qeth_init_qdio_out_buf(q, bidx); + QETH_CARD_TEXT(q->card, 2, "clprecov"); + } +} + + +static inline void qeth_qdio_handle_aob(struct qeth_card *card, + unsigned long phys_aob_addr) { + struct qaob *aob; + struct qeth_qdio_out_buffer *buffer; + enum iucv_tx_notify notification; + + aob = (struct qaob *) phys_to_virt(phys_aob_addr); + QETH_CARD_TEXT(card, 5, "haob"); + QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); + buffer = (struct qeth_qdio_out_buffer *) aob->user1; + QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); + + if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, + QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { + notification = TX_NOTIFY_OK; + } else { + WARN_ON_ONCE(atomic_read(&buffer->state) != + QETH_QDIO_BUF_PENDING); + atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); + notification = TX_NOTIFY_DELAYED_OK; + } + + if (aob->aorc != 0) { + QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); + notification = qeth_compute_cq_notification(aob->aorc, 1); + } + qeth_notify_skbs(buffer->q, buffer, notification); + + buffer->aob = NULL; + qeth_clear_output_buffer(buffer->q, buffer, + QETH_QDIO_BUF_HANDLED_DELAYED); + + /* from here on: do not touch buffer anymore */ + qdio_release_aob(aob); +} + +static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) +{ + return card->options.cq == QETH_CQ_ENABLED && + card->qdio.c_q != NULL && + queue != 0 && + queue == card->qdio.no_in_queues - 1; +} + + static int qeth_issue_next_read(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 5, "issnxrd"); + QETH_CARD_TEXT(card, 5, "issnxrd"); if (card->read.state != CH_STATE_UP) return -EIO; iob = qeth_get_buffer(&card->read); @@ -288,13 +508,14 @@ static int qeth_issue_next_read(struct qeth_card *card) return -ENOMEM; } qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); - QETH_DBF_TEXT(TRACE, 6, "noirqpnd"); + QETH_CARD_TEXT(card, 6, "noirqpnd"); rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, (addr_t) iob, 0, 0); if (rc) { QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " "rc=%i\n", dev_name(&card->gdev->dev), rc); atomic_set(&card->read.irq_pending, 0); + card->read_or_write_problem = 1; qeth_schedule_recovery(card); wake_up(&card->wait_q); } @@ -310,7 +531,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) atomic_set(&reply->refcnt, 1); atomic_set(&reply->received, 0); reply->card = card; - }; + } return reply; } @@ -334,12 +555,15 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, int com = cmd->hdr.command; ipa_name = qeth_get_ipa_cmd_name(com); if (rc) - QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n", - ipa_name, com, QETH_CARD_IFNAME(card), - rc, qeth_get_ipa_msg(rc)); + QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned " + "x%X \"%s\"\n", + ipa_name, com, dev_name(&card->gdev->dev), + QETH_CARD_IFNAME(card), rc, + qeth_get_ipa_msg(rc)); else - QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n", - ipa_name, com, QETH_CARD_IFNAME(card)); + QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n", + ipa_name, com, dev_name(&card->gdev->dev), + QETH_CARD_IFNAME(card)); } static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, @@ -347,7 +571,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, { struct qeth_ipa_cmd *cmd = NULL; - QETH_DBF_TEXT(TRACE, 5, "chkipad"); + QETH_CARD_TEXT(card, 5, "chkipad"); if (IS_IPA(iob->data)) { cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); if (IS_IPA_REPLY(cmd)) { @@ -361,11 +585,23 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, } else { switch (cmd->hdr.command) { case IPA_CMD_STOPLAN: - dev_warn(&card->gdev->dev, + if (cmd->hdr.return_code == + IPA_RC_VEPA_TO_VEB_TRANSITION) { + dev_err(&card->gdev->dev, + "Interface %s is down because the " + "adjacent port is no longer in " + "reflective relay mode\n", + QETH_CARD_IFNAME(card)); + qeth_close_dev(card); + } else { + dev_warn(&card->gdev->dev, "The link for interface %s on CHPID" " 0x%X failed\n", QETH_CARD_IFNAME(card), card->info.chpid); + qeth_issue_ipa_msg(cmd, + cmd->hdr.return_code, card); + } card->lan_online = 0; if (card->dev && netif_carrier_ok(card->dev)) netif_carrier_off(card->dev); @@ -378,15 +614,24 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, card->info.chpid); netif_carrier_on(card->dev); card->lan_online = 1; + if (card->info.hwtrap) + card->info.hwtrap = 2; qeth_schedule_recovery(card); return NULL; + case IPA_CMD_SETBRIDGEPORT: + case IPA_CMD_ADDRESS_CHANGE_NOTIF: + if (card->discipline->control_event_handler + (card, cmd)) + return cmd; + else + return NULL; case IPA_CMD_MODCCID: return cmd; case IPA_CMD_REGISTER_LOCAL_ADDR: - QETH_DBF_TEXT(TRACE, 3, "irla"); + QETH_CARD_TEXT(card, 3, "irla"); break; case IPA_CMD_UNREGISTER_LOCAL_ADDR: - QETH_DBF_TEXT(TRACE, 3, "urla"); + QETH_CARD_TEXT(card, 3, "urla"); break; default: QETH_DBF_MESSAGE(2, "Received data is IPA " @@ -403,7 +648,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) struct qeth_reply *reply, *r; unsigned long flags; - QETH_DBF_TEXT(TRACE, 4, "clipalst"); + QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { @@ -415,10 +660,12 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) qeth_put_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); + atomic_set(&card->write.irq_pending, 0); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); -static int qeth_check_idx_response(unsigned char *buffer) +static int qeth_check_idx_response(struct qeth_card *card, + unsigned char *buffer) { if (!buffer) return 0; @@ -430,9 +677,15 @@ static int qeth_check_idx_response(unsigned char *buffer) buffer[4], ((buffer[4] == 0x22) ? " -- try another portname" : "")); - QETH_DBF_TEXT(TRACE, 2, "ckidxres"); - QETH_DBF_TEXT(TRACE, 2, " idxterm"); - QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); + QETH_CARD_TEXT(card, 2, "ckidxres"); + QETH_CARD_TEXT(card, 2, " idxterm"); + QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); + if (buffer[4] == 0xf6) { + dev_err(&card->gdev->dev, + "The qeth device is not configured " + "for the OSI layer required by z/VM\n"); + return -EPERM; + } return -EIO; } return 0; @@ -443,8 +696,8 @@ static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, { struct qeth_card *card; - QETH_DBF_TEXT(TRACE, 4, "setupccw"); card = CARD_FROM_CDEV(channel->ccwdev); + QETH_CARD_TEXT(card, 4, "setupccw"); if (channel == &card->read) memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); else @@ -457,7 +710,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) { __u8 index; - QETH_DBF_TEXT(TRACE, 6, "getbuff"); + QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff"); index = channel->io_buf_no; do { if (channel->iob[index].state == BUF_STATE_FREE) { @@ -478,13 +731,14 @@ void qeth_release_buffer(struct qeth_channel *channel, { unsigned long flags; - QETH_DBF_TEXT(TRACE, 6, "relbuff"); + QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff"); spin_lock_irqsave(&channel->iob_lock, flags); memset(iob->data, 0, QETH_BUFSIZE); iob->state = BUF_STATE_FREE; iob->callback = qeth_send_control_data_cb; iob->rc = 0; spin_unlock_irqrestore(&channel->iob_lock, flags); + wake_up(&channel->wait_q); } EXPORT_SYMBOL_GPL(qeth_release_buffer); @@ -527,18 +781,19 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel, struct qeth_ipa_cmd *cmd; unsigned long flags; int keep_reply; - - QETH_DBF_TEXT(TRACE, 4, "sndctlcb"); + int rc = 0; card = CARD_FROM_CDEV(channel->ccwdev); - if (qeth_check_idx_response(iob->data)) { + QETH_CARD_TEXT(card, 4, "sndctlcb"); + rc = qeth_check_idx_response(card, iob->data); + switch (rc) { + case 0: + break; + case -EIO: qeth_clear_ipacmd_list(card); - if (((iob->data[2] & 0xc0) == 0xc0) && iob->data[4] == 0xf6) - dev_err(&card->gdev->dev, - "The qeth device is not configured " - "for the OSI layer required by z/VM\n"); - else - qeth_schedule_recovery(card); + qeth_schedule_recovery(card); + /* fall through */ + default: goto out; } @@ -605,8 +860,8 @@ static int qeth_setup_channel(struct qeth_channel *channel) QETH_DBF_TEXT(SETUP, 2, "setupch"); for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { - channel->iob[cnt].data = (char *) - kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); + channel->iob[cnt].data = + kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); if (channel->iob[cnt].data == NULL) break; channel->iob[cnt].state = BUF_STATE_FREE; @@ -697,7 +952,7 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread); void qeth_schedule_recovery(struct qeth_card *card) { - QETH_DBF_TEXT(TRACE, 2, "startrec"); + QETH_CARD_TEXT(card, 2, "startrec"); if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) schedule_work(&card->kernel_thread_starter); } @@ -707,18 +962,20 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) { int dstat, cstat; char *sense; + struct qeth_card *card; sense = (char *) irb->ecw; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; + card = CARD_FROM_CDEV(cdev); if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { - QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); + QETH_CARD_TEXT(card, 2, "CGENCHK"); dev_warn(&cdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); - QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x ", + QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n", dev_name(&cdev->dev), dstat, cstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 64, 1); @@ -728,23 +985,23 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) if (dstat & DEV_STAT_UNIT_CHECK) { if (sense[SENSE_RESETTING_EVENT_BYTE] & SENSE_RESETTING_EVENT_FLAG) { - QETH_DBF_TEXT(TRACE, 2, "REVIND"); + QETH_CARD_TEXT(card, 2, "REVIND"); return 1; } if (sense[SENSE_COMMAND_REJECT_BYTE] & SENSE_COMMAND_REJECT_FLAG) { - QETH_DBF_TEXT(TRACE, 2, "CMDREJi"); + QETH_CARD_TEXT(card, 2, "CMDREJi"); return 1; } if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { - QETH_DBF_TEXT(TRACE, 2, "AFFE"); + QETH_CARD_TEXT(card, 2, "AFFE"); return 1; } if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { - QETH_DBF_TEXT(TRACE, 2, "ZEROSEN"); + QETH_CARD_TEXT(card, 2, "ZEROSEN"); return 0; } - QETH_DBF_TEXT(TRACE, 2, "DGENCHK"); + QETH_CARD_TEXT(card, 2, "DGENCHK"); return 1; } return 0; @@ -753,25 +1010,27 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) static long __qeth_check_irb_error(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { - if (!IS_ERR(irb)) + struct qeth_card *card; + + card = CARD_FROM_CDEV(cdev); + + if (!card || !IS_ERR(irb)) return 0; switch (PTR_ERR(irb)) { case -EIO: QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", dev_name(&cdev->dev)); - QETH_DBF_TEXT(TRACE, 2, "ckirberr"); - QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); break; case -ETIMEDOUT: dev_warn(&cdev->dev, "A hardware operation timed out" " on the device\n"); - QETH_DBF_TEXT(TRACE, 2, "ckirberr"); - QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); if (intparm == QETH_RCD_PARM) { - struct qeth_card *card = CARD_FROM_CDEV(cdev); - - if (card && (card->data.ccwdev == cdev)) { + if (card->data.ccwdev == cdev) { card->data.state = CH_STATE_DOWN; wake_up(&card->wait_q); } @@ -780,8 +1039,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, default: QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", dev_name(&cdev->dev), PTR_ERR(irb)); - QETH_DBF_TEXT(TRACE, 2, "ckirberr"); - QETH_DBF_TEXT(TRACE, 2, " rc???"); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT(card, 2, " rc???"); } return PTR_ERR(irb); } @@ -797,8 +1056,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct qeth_cmd_buffer *iob; __u8 index; - QETH_DBF_TEXT(TRACE, 5, "irq"); - if (__qeth_check_irb_error(cdev, intparm, irb)) return; cstat = irb->scsw.cmd.cstat; @@ -808,15 +1065,17 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, if (!card) return; + QETH_CARD_TEXT(card, 5, "irq"); + if (card->read.ccwdev == cdev) { channel = &card->read; - QETH_DBF_TEXT(TRACE, 5, "read"); + QETH_CARD_TEXT(card, 5, "read"); } else if (card->write.ccwdev == cdev) { channel = &card->write; - QETH_DBF_TEXT(TRACE, 5, "write"); + QETH_CARD_TEXT(card, 5, "write"); } else { channel = &card->data; - QETH_DBF_TEXT(TRACE, 5, "data"); + QETH_CARD_TEXT(card, 5, "data"); } atomic_set(&channel->irq_pending, 0); @@ -832,12 +1091,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, goto out; if (intparm == QETH_CLEAR_CHANNEL_PARM) { - QETH_DBF_TEXT(TRACE, 6, "clrchpar"); + QETH_CARD_TEXT(card, 6, "clrchpar"); /* we don't have to handle this further */ intparm = 0; } if (intparm == QETH_HALT_CHANNEL_PARM) { - QETH_DBF_TEXT(TRACE, 6, "hltchpar"); + QETH_CARD_TEXT(card, 6, "hltchpar"); /* we don't have to handle this further */ intparm = 0; } @@ -896,23 +1155,73 @@ out: return; } -static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb) +static void qeth_notify_skbs(struct qeth_qdio_out_q *q, + struct qeth_qdio_out_buffer *buf, + enum iucv_tx_notify notification) +{ + struct sk_buff *skb; + + if (skb_queue_empty(&buf->skb_list)) + goto out; + skb = skb_peek(&buf->skb_list); + while (skb) { + QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); + QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); + if (skb->protocol == ETH_P_AF_IUCV) { + if (skb->sk) { + struct iucv_sock *iucv = iucv_sk(skb->sk); + iucv->sk_txnotify(skb, notification); + } + } + if (skb_queue_is_last(&buf->skb_list, skb)) + skb = NULL; + else + skb = skb_queue_next(&buf->skb_list, skb); + } +out: + return; +} + +static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) { - int i; struct sk_buff *skb; + struct iucv_sock *iucv; + int notify_general_error = 0; + + if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) + notify_general_error = 1; + + /* release may never happen from within CQ tasklet scope */ + WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); + + skb = skb_dequeue(&buf->skb_list); + while (skb) { + QETH_CARD_TEXT(buf->q->card, 5, "skbr"); + QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb); + if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) { + if (skb->sk) { + iucv = iucv_sk(skb->sk); + iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); + } + } + atomic_dec(&skb->users); + dev_kfree_skb_any(skb); + skb = skb_dequeue(&buf->skb_list); + } +} + +static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + enum qeth_qdio_buffer_states newbufstate) +{ + int i; /* is PCI flag set on buffer? */ - if (buf->buffer->element[0].flags & 0x40) + if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) atomic_dec(&queue->set_pci_flags_count); - if (!qeth_skip_skb) { - skb = skb_dequeue(&buf->skb_list); - while (skb) { - atomic_dec(&skb->users); - dev_kfree_skb_any(skb); - skb = skb_dequeue(&buf->skb_list); - } + if (newbufstate == QETH_QDIO_BUF_EMPTY) { + qeth_release_skbs(buf); } for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) @@ -921,31 +1230,42 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, buf->is_header[i] = 0; buf->buffer->element[i].length = 0; buf->buffer->element[i].addr = NULL; - buf->buffer->element[i].flags = 0; + buf->buffer->element[i].eflags = 0; + buf->buffer->element[i].sflags = 0; } - buf->buffer->element[15].flags = 0; + buf->buffer->element[15].eflags = 0; + buf->buffer->element[15].sflags = 0; buf->next_element_to_fill = 0; - atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); + atomic_set(&buf->state, newbufstate); } -static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf) +static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) { - __qeth_clear_output_buffer(queue, buf, 0); + int j; + + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { + if (!q->bufs[j]) + continue; + qeth_cleanup_handled_pending(q, j, 1); + qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY); + if (free) { + kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); + q->bufs[j] = NULL; + } + } } void qeth_clear_qdio_buffers(struct qeth_card *card) { - int i, j; + int i; - QETH_DBF_TEXT(TRACE, 2, "clearqdbf"); + QETH_CARD_TEXT(card, 2, "clearqdbf"); /* clear outbound buffers to free skbs */ - for (i = 0; i < card->qdio.no_out_queues; ++i) + for (i = 0; i < card->qdio.no_out_queues; ++i) { if (card->qdio.out_qs[i]) { - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) - qeth_clear_output_buffer(card->qdio.out_qs[i], - &card->qdio.out_qs[i]->bufs[j]); + qeth_clear_outq_buffers(card->qdio.out_qs[i], 0); } + } } EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); @@ -953,7 +1273,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; int i = 0; - QETH_DBF_TEXT(TRACE, 5, "freepool"); list_for_each_entry_safe(pool_entry, tmp, &card->qdio.init_pool.entry_list, init_list){ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) @@ -967,10 +1286,16 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) { int i, j; - QETH_DBF_TEXT(TRACE, 2, "freeqdbf"); if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == QETH_QDIO_UNINITIALIZED) return; + + qeth_free_cq(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { + if (card->qdio.in_q->bufs[j].rx_skb) + dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); + } kfree(card->qdio.in_q); card->qdio.in_q = NULL; /* inbound buffer pool */ @@ -978,9 +1303,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) /* free outbound qdio_qs */ if (card->qdio.out_qs) { for (i = 0; i < card->qdio.no_out_queues; ++i) { - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) - qeth_clear_output_buffer(card->qdio.out_qs[i], - &card->qdio.out_qs[i]->bufs[j]); + qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); kfree(card->qdio.out_qs[i]); } kfree(card->qdio.out_qs); @@ -997,32 +1320,54 @@ static void qeth_clean_channel(struct qeth_channel *channel) kfree(channel->iob[cnt].data); } -static int qeth_is_1920_device(struct qeth_card *card) +static void qeth_set_single_write_queues(struct qeth_card *card) +{ + if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && + (card->qdio.no_out_queues == 4)) + qeth_free_qdio_buffers(card); + + card->qdio.no_out_queues = 1; + if (card->qdio.default_out_queue != 0) + dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); + + card->qdio.default_out_queue = 0; +} + +static void qeth_set_multiple_write_queues(struct qeth_card *card) +{ + if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && + (card->qdio.no_out_queues == 1)) { + qeth_free_qdio_buffers(card); + card->qdio.default_out_queue = 2; + } + card->qdio.no_out_queues = 4; +} + +static void qeth_update_from_chp_desc(struct qeth_card *card) { - int single_queue = 0; struct ccw_device *ccwdev; - struct channelPath_dsc { - u8 flags; - u8 lsn; - u8 desc; - u8 chpid; - u8 swla; - u8 zeroes; - u8 chla; - u8 chpp; - } *chp_dsc; - - QETH_DBF_TEXT(SETUP, 2, "chk_1920"); + struct channel_path_desc *chp_dsc; + + QETH_DBF_TEXT(SETUP, 2, "chp_desc"); ccwdev = card->data.ccwdev; - chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); - if (chp_dsc != NULL) { - /* CHPP field bit 6 == 1 -> single queue */ - single_queue = ((chp_dsc->chpp & 0x02) == 0x02); - kfree(chp_dsc); - } - QETH_DBF_TEXT_(SETUP, 2, "rc:%x", single_queue); - return single_queue; + chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); + if (!chp_dsc) + goto out; + + card->info.func_level = 0x4100 + chp_dsc->desc; + if (card->info.type == QETH_CARD_TYPE_IQD) + goto out; + + /* CHPP field bit 6 == 1 -> single queue */ + if ((chp_dsc->chpp & 0x02) == 0x02) + qeth_set_single_write_queues(card); + else + qeth_set_multiple_write_queues(card); +out: + kfree(chp_dsc); + QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); + QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); } static void qeth_init_qdio_info(struct qeth_card *card) @@ -1031,7 +1376,10 @@ static void qeth_init_qdio_info(struct qeth_card *card) atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); /* inbound */ card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; - card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; + if (card->info.type == QETH_CARD_TYPE_IQD) + card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; + else + card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); @@ -1041,14 +1389,12 @@ static void qeth_set_intial_options(struct qeth_card *card) { card->options.route4.type = NO_ROUTER; card->options.route6.type = NO_ROUTER; - card->options.checksum_type = QETH_CHECKSUM_DEFAULT; - card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; - card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL; card->options.fake_broadcast = 0; card->options.add_hhlen = DEFAULT_ADD_HHLEN; card->options.performance_stats = 0; card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; + card->options.cq = QETH_CQ_DISABLED; } static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) @@ -1057,7 +1403,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); - QETH_DBF_TEXT_(TRACE, 4, " %02x%02x%02x", + QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", (u8) card->thread_start_mask, (u8) card->thread_allowed_mask, (u8) card->thread_running_mask); @@ -1068,16 +1414,23 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) static void qeth_start_kernel_thread(struct work_struct *work) { + struct task_struct *ts; struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter); - QETH_DBF_TEXT(TRACE , 2, "strthrd"); + QETH_CARD_TEXT(card , 2, "strthrd"); if (card->read.state != CH_STATE_UP && card->write.state != CH_STATE_UP) return; - if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) - kthread_run(card->discipline.recover, (void *) card, + if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { + ts = kthread_run(card->discipline->recover, (void *)card, "qeth_recover"); + if (IS_ERR(ts)) { + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, + QETH_RECOVER_THREAD); + } + } } static int qeth_setup_card(struct qeth_card *card) @@ -1091,14 +1444,15 @@ static int qeth_setup_card(struct qeth_card *card) card->data.state = CH_STATE_DOWN; card->state = CARD_STATE_DOWN; card->lan_online = 0; - card->use_hard_stop = 0; + card->read_or_write_problem = 0; card->dev = NULL; spin_lock_init(&card->vlanlock); spin_lock_init(&card->mclock); - card->vlangrp = NULL; spin_lock_init(&card->lock); spin_lock_init(&card->ip_lock); spin_lock_init(&card->thread_mask_lock); + mutex_init(&card->conf_mutex); + mutex_init(&card->discipline_mutex); card->thread_start_mask = 0; card->thread_allowed_mask = 0; card->thread_running_mask = 0; @@ -1107,7 +1461,7 @@ static int qeth_setup_card(struct qeth_card *card) INIT_LIST_HEAD(card->ip_tbd_list); INIT_LIST_HEAD(&card->cmd_waiter_list); init_waitqueue_head(&card->wait_q); - /* intial options */ + /* initial options */ qeth_set_intial_options(card); /* IP address takeover */ INIT_LIST_HEAD(&card->ipato.entries); @@ -1116,6 +1470,8 @@ static int qeth_setup_card(struct qeth_card *card) card->ipato.invert6 = 0; /* init QDIO stuff */ qeth_init_qdio_info(card); + INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); + INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); return 0; } @@ -1137,7 +1493,7 @@ static struct qeth_card *qeth_alloc_card(void) if (!card) goto out; QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); - card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); + card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); if (!card->ip_tbd_list) { QETH_DBF_TEXT(SETUP, 0, "iptbdnom"); goto out_card; @@ -1169,18 +1525,18 @@ static int qeth_determine_card_type(struct qeth_card *card) card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; - while (known_devices[i][4]) { - if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) && - (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) { - card->info.type = known_devices[i][4]; - card->qdio.no_out_queues = known_devices[i][8]; - card->info.is_multicast_different = known_devices[i][9]; - if (qeth_is_1920_device(card)) { - dev_info(&card->gdev->dev, - "Priority Queueing not supported\n"); - card->qdio.no_out_queues = 1; - card->qdio.default_out_queue = 0; - } + while (known_devices[i][QETH_DEV_MODEL_IND]) { + if ((CARD_RDEV(card)->id.dev_type == + known_devices[i][QETH_DEV_TYPE_IND]) && + (CARD_RDEV(card)->id.dev_model == + known_devices[i][QETH_DEV_MODEL_IND])) { + card->info.type = known_devices[i][QETH_DEV_MODEL_IND]; + card->qdio.no_out_queues = + known_devices[i][QETH_QUEUE_NO_IND]; + card->qdio.no_in_queues = 1; + card->info.is_multicast_different = + known_devices[i][QETH_MULTICAST_IND]; + qeth_update_from_chp_desc(card); return 0; } i++; @@ -1197,8 +1553,8 @@ static int qeth_clear_channel(struct qeth_channel *channel) struct qeth_card *card; int rc; - QETH_DBF_TEXT(TRACE, 3, "clearch"); card = CARD_FROM_CDEV(channel->ccwdev); + QETH_CARD_TEXT(card, 3, "clearch"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); @@ -1221,8 +1577,8 @@ static int qeth_halt_channel(struct qeth_channel *channel) struct qeth_card *card; int rc; - QETH_DBF_TEXT(TRACE, 3, "haltch"); card = CARD_FROM_CDEV(channel->ccwdev); + QETH_CARD_TEXT(card, 3, "haltch"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); @@ -1242,7 +1598,7 @@ static int qeth_halt_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; - QETH_DBF_TEXT(TRACE, 3, "haltchs"); + QETH_CARD_TEXT(card, 3, "haltchs"); rc1 = qeth_halt_channel(&card->read); rc2 = qeth_halt_channel(&card->write); rc3 = qeth_halt_channel(&card->data); @@ -1257,7 +1613,7 @@ static int qeth_clear_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; - QETH_DBF_TEXT(TRACE, 3, "clearchs"); + QETH_CARD_TEXT(card, 3, "clearchs"); rc1 = qeth_clear_channel(&card->read); rc2 = qeth_clear_channel(&card->write); rc3 = qeth_clear_channel(&card->data); @@ -1272,8 +1628,7 @@ static int qeth_clear_halt_card(struct qeth_card *card, int halt) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "clhacrd"); - QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *)); + QETH_CARD_TEXT(card, 3, "clhacrd"); if (halt) rc = qeth_halt_channels(card); @@ -1286,18 +1641,18 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "qdioclr"); + QETH_CARD_TEXT(card, 3, "qdioclr"); switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, QETH_QDIO_CLEANING)) { case QETH_QDIO_ESTABLISHED: if (card->info.type == QETH_CARD_TYPE_IQD) - rc = qdio_cleanup(CARD_DDEV(card), + rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_HALT); else - rc = qdio_cleanup(CARD_DDEV(card), + rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_CLEAR); if (rc) - QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc); + QETH_CARD_TEXT_(card, 3, "1err%d", rc); atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); break; case QETH_QDIO_CLEANING: @@ -1307,7 +1662,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) } rc = qeth_clear_halt_card(card, use_halt); if (rc) - QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc); + QETH_CARD_TEXT_(card, 3, "2err%d", rc); card->state = CARD_STATE_DOWN; return rc; } @@ -1375,14 +1730,15 @@ static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd) { QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); - if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) { - card->info.blkt.time_total = 250; - card->info.blkt.inter_packet = 5; - card->info.blkt.inter_packet_jumbo = 15; - } else { + if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && + prcd[76] >= 0xF1 && prcd[76] <= 0xF4) { card->info.blkt.time_total = 0; card->info.blkt.inter_packet = 0; card->info.blkt.inter_packet_jumbo = 0; + } else { + card->info.blkt.time_total = 250; + card->info.blkt.inter_packet = 5; + card->info.blkt.inter_packet_jumbo = 15; } } @@ -1397,22 +1753,16 @@ static void qeth_init_tokens(struct qeth_card *card) static void qeth_init_func_level(struct qeth_card *card) { - if (card->ipato.enabled) { - if (card->info.type == QETH_CARD_TYPE_IQD) - card->info.func_level = - QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT; - else - card->info.func_level = - QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT; - } else { - if (card->info.type == QETH_CARD_TYPE_IQD) - /*FIXME:why do we have same values for dis and ena for - osae??? */ - card->info.func_level = - QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT; - else - card->info.func_level = - QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT; + switch (card->info.type) { + case QETH_CARD_TYPE_IQD: + card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; + break; + case QETH_CARD_TYPE_OSD: + case QETH_CARD_TYPE_OSN: + card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; + break; + default: + break; } } @@ -1559,7 +1909,7 @@ static void qeth_idx_write_cb(struct qeth_channel *channel, card = CARD_FROM_CDEV(channel->ccwdev); if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { - if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) + if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) dev_err(&card->write.ccwdev->dev, "The adapter is used exclusively by another " "host\n"); @@ -1595,27 +1945,38 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, } card = CARD_FROM_CDEV(channel->ccwdev); - if (qeth_check_idx_response(iob->data)) + if (qeth_check_idx_response(card, iob->data)) goto out; if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { - if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) + switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { + case QETH_IDX_ACT_ERR_EXCL: dev_err(&card->write.ccwdev->dev, "The adapter is used exclusively by another " "host\n"); - else + break; + case QETH_IDX_ACT_ERR_AUTH: + case QETH_IDX_ACT_ERR_AUTH_USER: + dev_err(&card->read.ccwdev->dev, + "Setting the device online failed because of " + "insufficient authorization\n"); + break; + default: QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" " negative reply\n", dev_name(&card->read.ccwdev->dev)); + } + QETH_CARD_TEXT_(card, 2, "idxread%c", + QETH_IDX_ACT_CAUSE_CODE(iob->data)); goto out; } /** - * temporary fix for microcode bug - * to revert it,replace OR by AND - */ + * * temporary fix for microcode bug + * * to revert it,replace OR by AND + * */ if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) || - (card->info.type == QETH_CARD_TYPE_OSAE)) + (card->info.type == QETH_CARD_TYPE_OSD)) card->info.portname_required = 1; memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); @@ -1666,8 +2027,12 @@ int qeth_send_control_data(struct qeth_card *card, int len, unsigned long timeout, event_timeout; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 2, "sendctl"); + QETH_CARD_TEXT(card, 2, "sendctl"); + if (card->read_or_write_problem) { + qeth_release_buffer(iob->channel, iob); + return -EIO; + } reply = qeth_alloc_reply(card); if (!reply) { return -ENOMEM; @@ -1693,7 +2058,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, event_timeout = QETH_TIMEOUT; timeout = jiffies + event_timeout; - QETH_DBF_TEXT(TRACE, 6, "noirqpnd"); + QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, (addr_t) iob, 0, 0); @@ -1702,7 +2067,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " "ccw_device_start rc = %i\n", dev_name(&card->write.ccwdev->dev), rc); - QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); + QETH_CARD_TEXT_(card, 2, " err%d", rc); spin_lock_irqsave(&card->lock, flags); list_del_init(&reply->list); qeth_put_reply(reply); @@ -1726,20 +2091,25 @@ int qeth_send_control_data(struct qeth_card *card, int len, if (time_after(jiffies, timeout)) goto time_err; cpu_relax(); - }; + } } + if (reply->rc == -EIO) + goto error; rc = reply->rc; qeth_put_reply(reply); return rc; time_err: + reply->rc = -ETIME; spin_lock_irqsave(&reply->card->lock, flags); list_del_init(&reply->list); spin_unlock_irqrestore(&reply->card->lock, flags); - reply->rc = -ETIME; atomic_inc(&reply->received); - wake_up(&reply->wait_q); +error: + atomic_set(&card->write.irq_pending, 0); + qeth_release_buffer(iob->channel, iob); + card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; rc = reply->rc; qeth_put_reply(reply); return rc; @@ -1824,44 +2194,22 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) return 1500; case QETH_CARD_TYPE_IQD: return card->info.max_mtu; - case QETH_CARD_TYPE_OSAE: + case QETH_CARD_TYPE_OSD: switch (card->info.link_type) { case QETH_LINK_TYPE_HSTR: case QETH_LINK_TYPE_LANE_TR: return 2000; default: - return 1492; + return card->options.layer2 ? 1500 : 1492; } + case QETH_CARD_TYPE_OSM: + case QETH_CARD_TYPE_OSX: + return card->options.layer2 ? 1500 : 1492; default: return 1500; } } -static inline int qeth_get_max_mtu_for_card(int cardtype) -{ - switch (cardtype) { - - case QETH_CARD_TYPE_UNKNOWN: - case QETH_CARD_TYPE_OSAE: - case QETH_CARD_TYPE_OSN: - return 61440; - case QETH_CARD_TYPE_IQD: - return 57344; - default: - return 1500; - } -} - -static inline int qeth_get_mtu_out_of_mpc(int cardtype) -{ - switch (cardtype) { - case QETH_CARD_TYPE_IQD: - return 1; - default: - return 0; - } -} - static inline int qeth_get_mtu_outof_framesize(int framesize) { switch (framesize) { @@ -1881,11 +2229,12 @@ static inline int qeth_get_mtu_outof_framesize(int framesize) static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu) { switch (card->info.type) { - case QETH_CARD_TYPE_OSAE: - return ((mtu >= 576) && (mtu <= 61440)); + case QETH_CARD_TYPE_OSD: + case QETH_CARD_TYPE_OSM: + case QETH_CARD_TYPE_OSX: case QETH_CARD_TYPE_IQD: return ((mtu >= 576) && - (mtu <= card->info.max_mtu + 4096 - 32)); + (mtu <= card->info.max_mtu)); case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_UNKNOWN: default: @@ -1908,7 +2257,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, memcpy(&card->token.ulp_filter_r, QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); - if (qeth_get_mtu_out_of_mpc(card->info.type)) { + if (card->info.type == QETH_CARD_TYPE_IQD) { memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); mtu = qeth_get_mtu_outof_framesize(framesize); if (!mtu) { @@ -1916,12 +2265,22 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } - card->info.max_mtu = mtu; + if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) { + /* frame size has changed */ + if (card->dev && + ((card->dev->mtu == card->info.initial_mtu) || + (card->dev->mtu > mtu))) + card->dev->mtu = mtu; + qeth_free_qdio_buffers(card); + } card->info.initial_mtu = mtu; + card->info.max_mtu = mtu; card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE; } else { - card->info.initial_mtu = qeth_get_initial_mtu_for_card(card); - card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type); + card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU( + iob->data); + card->info.initial_mtu = min(card->info.max_mtu, + qeth_get_initial_mtu_for_card(card)); card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; } @@ -1932,6 +2291,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, card->info.link_type = link_type; } else card->info.link_type = 0; + QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -1982,6 +2342,13 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, memcpy(&card->token.ulp_connection_r, QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); + if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), + 3)) { + QETH_DBF_TEXT(SETUP, 2, "olmlimit"); + dev_err(&card->gdev->dev, "A connection could not be " + "established because of an OLM limit\n"); + iob->rc = -EMLINK; + } QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -2014,6 +2381,37 @@ static int qeth_ulp_setup(struct qeth_card *card) return rc; } +static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) +{ + int rc; + struct qeth_qdio_out_buffer *newbuf; + + rc = 0; + newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); + if (!newbuf) { + rc = -ENOMEM; + goto out; + } + newbuf->buffer = &q->qdio_bufs[bidx]; + skb_queue_head_init(&newbuf->skb_list); + lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); + newbuf->q = q; + newbuf->aob = NULL; + newbuf->next_pending = q->bufs[bidx]; + atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); + q->bufs[bidx] = newbuf; + if (q->bufstates) { + q->bufstates[bidx].user = newbuf; + QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx); + QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf); + QETH_CARD_TEXT_(q->card, 2, "%lx", + (long) newbuf->next_pending); + } +out: + return rc; +} + + static int qeth_alloc_qdio_buffers(struct qeth_card *card) { int i, j; @@ -2024,52 +2422,63 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) return 0; - card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), - GFP_KERNEL); + card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q), + GFP_KERNEL); if (!card->qdio.in_q) goto out_nomem; QETH_DBF_TEXT(SETUP, 2, "inq"); QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *)); memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); /* give inbound qeth_qdio_buffers their qdio_buffers */ - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { card->qdio.in_q->bufs[i].buffer = &card->qdio.in_q->qdio_bufs[i]; + card->qdio.in_q->bufs[i].rx_skb = NULL; + } /* inbound buffer pool */ if (qeth_alloc_buffer_pool(card)) goto out_freeinq; + /* outbound */ card->qdio.out_qs = - kmalloc(card->qdio.no_out_queues * + kzalloc(card->qdio.no_out_queues * sizeof(struct qeth_qdio_out_q *), GFP_KERNEL); if (!card->qdio.out_qs) goto out_freepool; for (i = 0; i < card->qdio.no_out_queues; ++i) { - card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), + card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q), GFP_KERNEL); if (!card->qdio.out_qs[i]) goto out_freeoutq; QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); - memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q)); card->qdio.out_qs[i]->queue_no = i; /* give outbound qeth_qdio_buffers their qdio_buffers */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { - card->qdio.out_qs[i]->bufs[j].buffer = - &card->qdio.out_qs[i]->qdio_bufs[j]; - skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j]. - skb_list); - lockdep_set_class( - &card->qdio.out_qs[i]->bufs[j].skb_list.lock, - &qdio_out_skb_queue_key); - INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list); + WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL); + if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) + goto out_freeoutqbufs; } } + + /* completion */ + if (qeth_alloc_cq(card)) + goto out_freeoutq; + return 0; +out_freeoutqbufs: + while (j > 0) { + --j; + kmem_cache_free(qeth_qdio_outbuf_cache, + card->qdio.out_qs[i]->bufs[j]); + card->qdio.out_qs[i]->bufs[j] = NULL; + } out_freeoutq: - while (i > 0) + while (i > 0) { kfree(card->qdio.out_qs[--i]); + qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); + } kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; out_freepool: @@ -2188,6 +2597,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) return 0; out_qdio: qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); + qdio_free(CARD_DDEV(card)); return rc; } @@ -2236,7 +2646,9 @@ static void qeth_print_status_no_portname(struct qeth_card *card) void qeth_print_status_message(struct qeth_card *card) { switch (card->info.type) { - case QETH_CARD_TYPE_OSAE: + case QETH_CARD_TYPE_OSD: + case QETH_CARD_TYPE_OSM: + case QETH_CARD_TYPE_OSX: /* VM will use a non-zero first character * to indicate a HiperSockets like reporting * of the level OSA sets the first character to zero @@ -2278,7 +2690,7 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *entry; - QETH_DBF_TEXT(TRACE, 5, "inwrklst"); + QETH_CARD_TEXT(card, 5, "inwrklst"); list_for_each_entry(entry, &card->qdio.init_pool.entry_list, init_list) { @@ -2338,6 +2750,12 @@ static int qeth_init_input_buffer(struct qeth_card *card, struct qeth_buffer_pool_entry *pool_entry; int i; + if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { + buf->rx_skb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN); + if (!buf->rx_skb) + return 1; + } + pool_entry = qeth_find_free_buffer_pool_entry(card); if (!pool_entry) return 1; @@ -2354,9 +2772,10 @@ static int qeth_init_input_buffer(struct qeth_card *card, buf->buffer->element[i].length = PAGE_SIZE; buf->buffer->element[i].addr = pool_entry->elements[i]; if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) - buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY; + buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; else - buf->buffer->element[i].flags = 0; + buf->buffer->element[i].eflags = 0; + buf->buffer->element[i].sflags = 0; } return 0; } @@ -2383,13 +2802,21 @@ int qeth_init_qdio_queues(struct qeth_card *card) QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); return rc; } + + /* completion */ + rc = qeth_cq_init(card); + if (rc) { + return rc; + } + /* outbound queue */ for (i = 0; i < card->qdio.no_out_queues; ++i) { memset(card->qdio.out_qs[i]->qdio_bufs, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { qeth_clear_output_buffer(card->qdio.out_qs[i], - &card->qdio.out_qs[i]->bufs[j]); + card->qdio.out_qs[i]->bufs[j], + QETH_QDIO_BUF_EMPTY); } card->qdio.out_qs[i]->card = card; card->qdio.out_qs[i]->next_buf_to_fill = 0; @@ -2465,7 +2892,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int rc; char prot_type; - QETH_DBF_TEXT(TRACE, 4, "sendipa"); + QETH_CARD_TEXT(card, 4, "sendipa"); if (card->options.layer2) if (card->info.type == QETH_CARD_TYPE_OSN) @@ -2477,55 +2904,33 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, qeth_prepare_ipa_cmd(card, iob, prot_type); rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob, reply_cb, reply_param); + if (rc == -ETIME) { + qeth_clear_ipacmd_list(card); + qeth_schedule_recovery(card); + } return rc; } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); -static int qeth_send_startstoplan(struct qeth_card *card, - enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot) -{ - int rc; - struct qeth_cmd_buffer *iob; - - iob = qeth_get_ipacmd_buffer(card, ipacmd, prot); - rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); - - return rc; -} - int qeth_send_startlan(struct qeth_card *card) { int rc; + struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "strtlan"); - rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_send_startlan); -int qeth_send_stoplan(struct qeth_card *card) -{ - int rc = 0; - - /* - * TODO: according to the IPA format document page 14, - * TCP/IP (we!) never issue a STOPLAN - * is this right ?!? - */ - QETH_DBF_TEXT(SETUP, 2, "stoplan"); - - rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0); - return rc; -} -EXPORT_SYMBOL_GPL(qeth_send_stoplan); - -int qeth_default_setadapterparms_cb(struct qeth_card *card, +static int qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "defadpcb"); + QETH_CARD_TEXT(card, 4, "defadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code == 0) @@ -2533,25 +2938,26 @@ int qeth_default_setadapterparms_cb(struct qeth_card *card, cmd->data.setadapterparms.hdr.return_code; return 0; } -EXPORT_SYMBOL_GPL(qeth_default_setadapterparms_cb); static int qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 3, "quyadpcb"); + QETH_CARD_TEXT(card, 3, "quyadpcb"); cmd = (struct qeth_ipa_cmd *) data; - if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) + if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { card->info.link_type = cmd->data.setadapterparms.data.query_cmds_supp.lan_type; + QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type); + } card->options.adp.supported_funcs = cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); } -struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, +static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen) { struct qeth_cmd_buffer *iob; @@ -2567,14 +2973,13 @@ struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, return iob; } -EXPORT_SYMBOL_GPL(qeth_get_adapter_cmd); int qeth_query_setadapterparms(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 3, "queryadp"); + QETH_CARD_TEXT(card, 3, "queryadp"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, sizeof(struct qeth_ipacmd_setadpparms)); rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); @@ -2582,18 +2987,163 @@ int qeth_query_setadapterparms(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); +static int qeth_query_ipassists_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(SETUP, 2, "qipasscb"); + + cmd = (struct qeth_ipa_cmd *) data; + + switch (cmd->hdr.return_code) { + case IPA_RC_NOTSUPP: + case IPA_RC_L2_UNSUPPORTED_CMD: + QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); + card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; + card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; + return -0; + default: + if (cmd->hdr.return_code) { + QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " + "rc=%d\n", + dev_name(&card->gdev->dev), + cmd->hdr.return_code); + return 0; + } + } + + if (cmd->hdr.prot_version == QETH_PROT_IPV4) { + card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; + card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; + } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { + card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; + card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; + } else + QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" + "\n", dev_name(&card->gdev->dev)); + return 0; +} + +int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); + rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_query_ipassists); + +static int qeth_query_setdiagass_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + __u16 rc; + + cmd = (struct qeth_ipa_cmd *)data; + rc = cmd->hdr.return_code; + if (rc) + QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); + else + card->info.diagass_support = cmd->data.diagass.ext; + return 0; +} + +static int qeth_query_setdiagass(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(SETUP, 2, "qdiagass"); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.diagass.subcmd_len = 16; + cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; + return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); +} + +static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) +{ + unsigned long info = get_zeroed_page(GFP_KERNEL); + struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; + struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; + struct ccw_dev_id ccwid; + int level; + + tid->chpid = card->info.chpid; + ccw_device_get_id(CARD_RDEV(card), &ccwid); + tid->ssid = ccwid.ssid; + tid->devno = ccwid.devno; + if (!info) + return; + level = stsi(NULL, 0, 0, 0); + if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) + tid->lparnr = info222->lpar_number; + if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { + EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); + memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); + } + free_page(info); + return; +} + +static int qeth_hw_trap_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + __u16 rc; + + cmd = (struct qeth_ipa_cmd *)data; + rc = cmd->hdr.return_code; + if (rc) + QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); + return 0; +} + +int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(SETUP, 2, "diagtrap"); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.diagass.subcmd_len = 80; + cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; + cmd->data.diagass.type = 1; + cmd->data.diagass.action = action; + switch (action) { + case QETH_DIAGS_TRAP_ARM: + cmd->data.diagass.options = 0x0003; + cmd->data.diagass.ext = 0x00010000 + + sizeof(struct qeth_trap_id); + qeth_get_trap_id(card, + (struct qeth_trap_id *)cmd->data.diagass.cdata); + break; + case QETH_DIAGS_TRAP_DISARM: + cmd->data.diagass.options = 0x0001; + break; + case QETH_DIAGS_TRAP_CAPTURE: + break; + } + return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); +} +EXPORT_SYMBOL_GPL(qeth_hw_trap); + int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, unsigned int qdio_error, const char *dbftext) { if (qdio_error) { - QETH_DBF_TEXT(TRACE, 2, dbftext); - QETH_DBF_TEXT(QERR, 2, dbftext); - QETH_DBF_TEXT_(QERR, 2, " F15=%02X", - buf->element[15].flags & 0xff); - QETH_DBF_TEXT_(QERR, 2, " F14=%02X", - buf->element[14].flags & 0xff); - QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); - if ((buf->element[15].flags & 0xff) == 0x12) { + QETH_CARD_TEXT(card, 2, dbftext); + QETH_CARD_TEXT_(card, 2, " F15=%02X", + buf->element[15].sflags); + QETH_CARD_TEXT_(card, 2, " F14=%02X", + buf->element[14].sflags); + QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); + if ((buf->element[15].sflags) == 0x12) { card->stats.rx_dropped++; return 0; } else @@ -2603,9 +3153,19 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, } EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); +void qeth_buffer_reclaim_work(struct work_struct *work) +{ + struct qeth_card *card = container_of(work, struct qeth_card, + buffer_reclaim_work.work); + + QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); + qeth_queue_input_buffer(card, card->reclaim_index); +} + void qeth_queue_input_buffer(struct qeth_card *card, int index) { struct qeth_qdio_q *queue = card->qdio.in_q; + struct list_head *lh; int count; int i; int rc; @@ -2637,6 +3197,20 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) atomic_add_unless(&card->force_alloc_skb, -1, 0); } + if (!count) { + i = 0; + list_for_each(lh, &card->qdio.in_buf_pool.entry_list) + i++; + if (i == card->qdio.in_buf_pool.buf_count) { + QETH_CARD_TEXT(card, 2, "qsarbw"); + card->reclaim_index = index; + schedule_delayed_work( + &card->buffer_reclaim_work, + QETH_RECLAIM_WORK_TIME); + } + return; + } + /* * according to old code it should be avoided to requeue all * 128 buffers in order to benefit from PCI avoidance. @@ -2656,10 +3230,7 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) qeth_get_micros() - card->perf_stats.inbound_do_qdio_start_time; if (rc) { - dev_warn(&card->gdev->dev, - "QDIO reported an error, rc=%i\n", rc); - QETH_DBF_TEXT(TRACE, 2, "qinberr"); - QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); + QETH_CARD_TEXT(card, 2, "qinberr"); } queue->next_buf_to_init = (queue->next_buf_to_init + count) % QDIO_MAX_BUFFERS_PER_Q; @@ -2670,9 +3241,9 @@ EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); static int qeth_handle_send_error(struct qeth_card *card, struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) { - int sbalf15 = buffer->buffer->element[15].flags & 0xff; + int sbalf15 = buffer->buffer->element[15].sflags; - QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); + QETH_CARD_TEXT(card, 6, "hdsnderr"); if (card->info.type == QETH_CARD_TYPE_IQD) { if (sbalf15 == 0) { qdio_err = 0; @@ -2688,9 +3259,8 @@ static int qeth_handle_send_error(struct qeth_card *card, if ((sbalf15 >= 15) && (sbalf15 <= 31)) return QETH_SEND_ERROR_RETRY; - QETH_DBF_TEXT(TRACE, 1, "lnkfail"); - QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 1, "%04x %02x", + QETH_CARD_TEXT(card, 1, "lnkfail"); + QETH_CARD_TEXT_(card, 1, "%04x %02x", (u16)qdio_err, (u8)sbalf15); return QETH_SEND_ERROR_LINK_FAILURE; } @@ -2705,7 +3275,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) if (atomic_read(&queue->used_buffers) >= QETH_HIGH_WATERMARK_PACK){ /* switch non-PACKING -> PACKING */ - QETH_DBF_TEXT(TRACE, 6, "np->pack"); + QETH_CARD_TEXT(queue->card, 6, "np->pack"); if (queue->card->options.performance_stats) queue->card->perf_stats.sc_dp_p++; queue->do_pack = 1; @@ -2728,17 +3298,17 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) if (atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) { /* switch PACKING -> non-PACKING */ - QETH_DBF_TEXT(TRACE, 6, "pack->np"); + QETH_CARD_TEXT(queue->card, 6, "pack->np"); if (queue->card->options.performance_stats) queue->card->perf_stats.sc_p_dp++; queue->do_pack = 0; /* flush packing buffers */ - buffer = &queue->bufs[queue->next_buf_to_fill]; + buffer = queue->bufs[queue->next_buf_to_fill]; if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && (buffer->next_element_to_fill > 0)) { atomic_set(&buffer->state, - QETH_QDIO_BUF_PRIMED); + QETH_QDIO_BUF_PRIMED); flush_count++; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % @@ -2749,6 +3319,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) return flush_count; } + /* * Called to flush a packing buffer if no more pci flags are on the queue. * Checks if there is a packing buffer and prepares it to be flushed. @@ -2758,7 +3329,7 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; - buffer = &queue->bufs[queue->next_buf_to_fill]; + buffer = queue->bufs[queue->next_buf_to_fill]; if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && (buffer->next_element_to_fill > 0)) { /* it's a packing buffer */ @@ -2779,9 +3350,13 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, unsigned int qdio_flags; for (i = index; i < index + count; ++i) { - buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; - buf->buffer->element[buf->next_element_to_fill - 1].flags |= - SBAL_FLAGS_LAST_ENTRY; + int bidx = i % QDIO_MAX_BUFFERS_PER_Q; + buf = queue->bufs[bidx]; + buf->buffer->element[buf->next_element_to_fill - 1].eflags |= + SBAL_EFLAGS_LAST_ENTRY; + + if (queue->bufstates) + queue->bufstates[bidx].user = buf; if (queue->card->info.type == QETH_CARD_TYPE_IQD) continue; @@ -2794,7 +3369,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, /* it's likely that we'll go to packing * mode soon */ atomic_inc(&queue->set_pci_flags_count); - buf->buffer->element[0].flags |= 0x40; + buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; } } else { if (!atomic_read(&queue->set_pci_flags_count)) { @@ -2807,12 +3382,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, * further send was requested by the stack */ atomic_inc(&queue->set_pci_flags_count); - buf->buffer->element[0].flags |= 0x40; + buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; } } } - queue->sync_iqdio_error = 0; queue->card->dev->trans_start = jiffies; if (queue->card->options.performance_stats) { queue->card->perf_stats.outbound_do_qdio_cnt++; @@ -2828,25 +3402,23 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - queue->card->perf_stats.outbound_do_qdio_start_time; - if (rc > 0) { - if (!(rc & QDIO_ERROR_SIGA_BUSY)) - queue->sync_iqdio_error = rc & 3; - } + atomic_add(count, &queue->used_buffers); if (rc) { queue->card->stats.tx_errors += count; /* ignore temporary SIGA errors without busy condition */ - if (rc == QDIO_ERROR_SIGA_TARGET) + if (rc == -ENOBUFS) return; - QETH_DBF_TEXT(TRACE, 2, "flushbuf"); - QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); - QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card)); + QETH_CARD_TEXT(queue->card, 2, "flushbuf"); + QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); + QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); + QETH_CARD_TEXT_(queue->card, 2, " c%d", count); + QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); /* this must not happen under normal circumstances. if it * happens something is really wrong -> recover */ qeth_schedule_recovery(queue->card); return; } - atomic_add(count, &queue->used_buffers); if (queue->card->options.performance_stats) queue->card->perf_stats.bufs_sent += count; } @@ -2891,6 +3463,133 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) } } +void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, + unsigned long card_ptr) +{ + struct qeth_card *card = (struct qeth_card *)card_ptr; + + if (card->dev && (card->dev->flags & IFF_UP)) + napi_schedule(&card->napi); +} +EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); + +int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) +{ + int rc; + + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { + rc = -1; + goto out; + } else { + if (card->options.cq == cq) { + rc = 0; + goto out; + } + + if (card->state != CARD_STATE_DOWN && + card->state != CARD_STATE_RECOVER) { + rc = -1; + goto out; + } + + qeth_free_qdio_buffers(card); + card->options.cq = cq; + rc = 0; + } +out: + return rc; + +} +EXPORT_SYMBOL_GPL(qeth_configure_cq); + + +static void qeth_qdio_cq_handler(struct qeth_card *card, + unsigned int qdio_err, + unsigned int queue, int first_element, int count) { + struct qeth_qdio_q *cq = card->qdio.c_q; + int i; + int rc; + + if (!qeth_is_cq(card, queue)) + goto out; + + QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); + QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); + QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); + + if (qdio_err) { + netif_stop_queue(card->dev); + qeth_schedule_recovery(card); + goto out; + } + + if (card->options.performance_stats) { + card->perf_stats.cq_cnt++; + card->perf_stats.cq_start_time = qeth_get_micros(); + } + + for (i = first_element; i < first_element + count; ++i) { + int bidx = i % QDIO_MAX_BUFFERS_PER_Q; + struct qdio_buffer *buffer = &cq->qdio_bufs[bidx]; + int e; + + e = 0; + while (buffer->element[e].addr) { + unsigned long phys_aob_addr; + + phys_aob_addr = (unsigned long) buffer->element[e].addr; + qeth_qdio_handle_aob(card, phys_aob_addr); + buffer->element[e].addr = NULL; + buffer->element[e].eflags = 0; + buffer->element[e].sflags = 0; + buffer->element[e].length = 0; + + ++e; + } + + buffer->element[15].eflags = 0; + buffer->element[15].sflags = 0; + } + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, + card->qdio.c_q->next_buf_to_init, + count); + if (rc) { + dev_warn(&card->gdev->dev, + "QDIO reported an error, rc=%i\n", rc); + QETH_CARD_TEXT(card, 2, "qcqherr"); + } + card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init + + count) % QDIO_MAX_BUFFERS_PER_Q; + + netif_wake_queue(card->dev); + + if (card->options.performance_stats) { + int delta_t = qeth_get_micros(); + delta_t -= card->perf_stats.cq_start_time; + card->perf_stats.cq_time += delta_t; + } +out: + return; +} + +void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, + unsigned int queue, int first_elem, int count, + unsigned long card_ptr) +{ + struct qeth_card *card = (struct qeth_card *)card_ptr; + + QETH_CARD_TEXT_(card, 2, "qihq%d", queue); + QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); + + if (qeth_is_cq(card, queue)) + qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); + else if (qdio_err) + qeth_schedule_recovery(card); + + +} +EXPORT_SYMBOL_GPL(qeth_qdio_input_handler); + void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int qdio_error, int __queue, int first_element, int count, unsigned long card_ptr) @@ -2899,12 +3598,10 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; struct qeth_qdio_out_buffer *buffer; int i; - unsigned qeth_send_err; - QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); - if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 2, "achkcond"); - QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); + QETH_CARD_TEXT(card, 6, "qdouhdl"); + if (qdio_error & QDIO_ERROR_FATAL) { + QETH_CARD_TEXT(card, 2, "achkcond"); netif_stop_queue(card->dev); qeth_schedule_recovery(card); return; @@ -2915,10 +3612,44 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_get_micros(); } for (i = first_element; i < (first_element + count); ++i) { - buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; - qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error); - __qeth_clear_output_buffer(queue, buffer, - (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0); + int bidx = i % QDIO_MAX_BUFFERS_PER_Q; + buffer = queue->bufs[bidx]; + qeth_handle_send_error(card, buffer, qdio_error); + + if (queue->bufstates && + (queue->bufstates[bidx].flags & + QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { + WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); + + if (atomic_cmpxchg(&buffer->state, + QETH_QDIO_BUF_PRIMED, + QETH_QDIO_BUF_PENDING) == + QETH_QDIO_BUF_PRIMED) { + qeth_notify_skbs(queue, buffer, + TX_NOTIFY_PENDING); + } + buffer->aob = queue->bufstates[bidx].aob; + QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); + QETH_CARD_TEXT(queue->card, 5, "aob"); + QETH_CARD_TEXT_(queue->card, 5, "%lx", + virt_to_phys(buffer->aob)); + if (qeth_init_qdio_out_buf(queue, bidx)) { + QETH_CARD_TEXT(card, 2, "outofbuf"); + qeth_schedule_recovery(card); + } + } else { + if (card->options.cq == QETH_CQ_ENABLED) { + enum iucv_tx_notify n; + + n = qeth_compute_cq_notification( + buffer->buffer->element[15].sflags, 0); + qeth_notify_skbs(queue, buffer, n); + } + + qeth_clear_output_buffer(queue, buffer, + QETH_QDIO_BUF_EMPTY); + } + qeth_cleanup_handled_pending(queue, bidx, 0); } atomic_sub(count, &queue->used_buffers); /* check if we need to do something on this outbound queue */ @@ -2932,54 +3663,87 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, } EXPORT_SYMBOL_GPL(qeth_qdio_output_handler); +/** + * Note: Function assumes that we have 4 outbound queues. + */ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int ipv, int cast_type) { - if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE)) - return card->qdio.default_out_queue; - switch (card->qdio.no_out_queues) { - case 4: - if (cast_type && card->info.is_multicast_different) - return card->info.is_multicast_different & - (card->qdio.no_out_queues - 1); - if (card->qdio.do_prio_queueing && (ipv == 4)) { - const u8 tos = ip_hdr(skb)->tos; - - if (card->qdio.do_prio_queueing == - QETH_PRIO_Q_ING_TOS) { - if (tos & IP_TOS_NOTIMPORTANT) - return 3; - if (tos & IP_TOS_HIGHRELIABILITY) - return 2; - if (tos & IP_TOS_HIGHTHROUGHPUT) - return 1; - if (tos & IP_TOS_LOWDELAY) - return 0; - } - if (card->qdio.do_prio_queueing == - QETH_PRIO_Q_ING_PREC) - return 3 - (tos >> 6); - } else if (card->qdio.do_prio_queueing && (ipv == 6)) { - /* TODO: IPv6!!! */ + __be16 *tci; + u8 tos; + + if (cast_type && card->info.is_multicast_different) + return card->info.is_multicast_different & + (card->qdio.no_out_queues - 1); + + switch (card->qdio.do_prio_queueing) { + case QETH_PRIO_Q_ING_TOS: + case QETH_PRIO_Q_ING_PREC: + switch (ipv) { + case 4: + tos = ipv4_get_dsfield(ip_hdr(skb)); + break; + case 6: + tos = ipv6_get_dsfield(ipv6_hdr(skb)); + break; + default: + return card->qdio.default_out_queue; } - return card->qdio.default_out_queue; - case 1: /* fallthrough for single-out-queue 1920-device */ + if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) + return ~tos >> 6 & 3; + if (tos & IPTOS_MINCOST) + return 3; + if (tos & IPTOS_RELIABILITY) + return 2; + if (tos & IPTOS_THROUGHPUT) + return 1; + if (tos & IPTOS_LOWDELAY) + return 0; + break; + case QETH_PRIO_Q_ING_SKB: + if (skb->priority > 5) + return 0; + return ~skb->priority >> 1 & 3; + case QETH_PRIO_Q_ING_VLAN: + tci = &((struct ethhdr *)skb->data)->h_proto; + if (*tci == ETH_P_8021Q) + return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3; + break; default: - return card->qdio.default_out_queue; + break; } + return card->qdio.default_out_queue; } EXPORT_SYMBOL_GPL(qeth_get_priority_queue); -int qeth_get_elements_no(struct qeth_card *card, void *hdr, +int qeth_get_elements_for_frags(struct sk_buff *skb) +{ + int cnt, length, e, elements = 0; + struct skb_frag_struct *frag; + char *data; + + for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { + frag = &skb_shinfo(skb)->frags[cnt]; + data = (char *)page_to_phys(skb_frag_page(frag)) + + frag->page_offset; + length = frag->size; + e = PFN_UP((unsigned long)data + length - 1) - + PFN_DOWN((unsigned long)data); + elements += e; + } + return elements; +} +EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); + +int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int elems) { - int elements_needed = 0; + int dlen = skb->len - skb->data_len; + int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - + PFN_DOWN((unsigned long)skb->data); + + elements_needed += qeth_get_elements_for_frags(skb); - if (skb_shinfo(skb)->nr_frags > 0) - elements_needed = (skb_shinfo(skb)->nr_frags + 1); - if (elements_needed == 0) - elements_needed = 1 + (((((unsigned long) skb->data) % - PAGE_SIZE) + skb->len) >> PAGE_SHIFT); if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " "(Number=%d / Length=%d). Discarded.\n", @@ -2990,15 +3754,37 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, } EXPORT_SYMBOL_GPL(qeth_get_elements_no); +int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) +{ + int hroom, inpage, rest; + + if (((unsigned long)skb->data & PAGE_MASK) != + (((unsigned long)skb->data + len - 1) & PAGE_MASK)) { + hroom = skb_headroom(skb); + inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE); + rest = len - inpage; + if (rest > hroom) + return 1; + memmove(skb->data - rest, skb->data, skb->len - skb->data_len); + skb->data -= rest; + skb->tail -= rest; + *hdr = (struct qeth_hdr *)skb->data; + QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest); + } + return 0; +} +EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); + static inline void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, int offset) { - int length = skb->len; + int length = skb->len - skb->data_len; int length_here; int element; char *data; - int first_lap ; + int first_lap, cnt; + struct skb_frag_struct *frag; element = *next_element_to_fill; data = skb->data; @@ -3021,22 +3807,50 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, length -= length_here; if (!length) { if (first_lap) - buffer->element[element].flags = 0; + if (skb_shinfo(skb)->nr_frags) + buffer->element[element].eflags = + SBAL_EFLAGS_FIRST_FRAG; + else + buffer->element[element].eflags = 0; else - buffer->element[element].flags = - SBAL_FLAGS_LAST_FRAG; + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; } else { if (first_lap) - buffer->element[element].flags = - SBAL_FLAGS_FIRST_FRAG; + buffer->element[element].eflags = + SBAL_EFLAGS_FIRST_FRAG; else - buffer->element[element].flags = - SBAL_FLAGS_MIDDLE_FRAG; + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; } data += length_here; element++; first_lap = 0; } + + for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { + frag = &skb_shinfo(skb)->frags[cnt]; + data = (char *)page_to_phys(skb_frag_page(frag)) + + frag->page_offset; + length = frag->size; + while (length > 0) { + length_here = PAGE_SIZE - + ((unsigned long) data % PAGE_SIZE); + if (length < length_here) + length_here = length; + + buffer->element[element].addr = data; + buffer->element[element].length = length_here; + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; + length -= length_here; + data += length_here; + element++; + } + } + + if (buffer->element[element - 1].eflags) + buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; *next_element_to_fill = element; } @@ -3060,7 +3874,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, /*fill first buffer entry only with header information */ buffer->element[element].addr = skb->data; buffer->element[element].length = hdr_len; - buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; + buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buf->next_element_to_fill++; skb->data += hdr_len; skb->len -= hdr_len; @@ -3072,25 +3886,21 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, buffer->element[element].addr = hdr; buffer->element[element].length = sizeof(struct qeth_hdr) + hd_len; - buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; + buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buf->is_header[element] = 1; buf->next_element_to_fill++; } - if (skb_shinfo(skb)->nr_frags == 0) - __qeth_fill_buffer(skb, buffer, large_send, - (int *)&buf->next_element_to_fill, offset); - else - __qeth_fill_buffer_frag(skb, buffer, large_send, - (int *)&buf->next_element_to_fill); + __qeth_fill_buffer(skb, buffer, large_send, + (int *)&buf->next_element_to_fill, offset); if (!queue->do_pack) { - QETH_DBF_TEXT(TRACE, 6, "fillbfnp"); + QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); /* set state to PRIMED -> will be flushed */ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); flush_cnt = 1; } else { - QETH_DBF_TEXT(TRACE, 6, "fillbfpa"); + QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); if (queue->card->options.performance_stats) queue->card->perf_stats.skbs_sent_pack++; if (buf->next_element_to_fill >= @@ -3112,17 +3922,14 @@ int qeth_do_send_packet_fast(struct qeth_card *card, int offset, int hd_len) { struct qeth_qdio_out_buffer *buffer; - struct sk_buff *skb1; - struct qeth_skb_data *retry_ctrl; int index; - int rc; /* spin until we get the queue ... */ while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); /* ... now we've got the queue */ index = queue->next_buf_to_fill; - buffer = &queue->bufs[queue->next_buf_to_fill]; + buffer = queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed @@ -3134,25 +3941,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card, atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); qeth_flush_buffers(queue, index, 1); - if (queue->sync_iqdio_error == 2) { - skb1 = skb_dequeue(&buffer->skb_list); - while (skb1) { - atomic_dec(&skb1->users); - skb1 = skb_dequeue(&buffer->skb_list); - } - retry_ctrl = (struct qeth_skb_data *) &skb->cb[16]; - if (retry_ctrl->magic != QETH_SKB_MAGIC) { - retry_ctrl->magic = QETH_SKB_MAGIC; - retry_ctrl->count = 0; - } - if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) { - retry_ctrl->count++; - rc = dev_queue_xmit(skb); - } else { - dev_kfree_skb_any(skb); - QETH_DBF_TEXT(QERR, 2, "qrdrop"); - } - } return 0; out: atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); @@ -3175,7 +3963,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); start_index = queue->next_buf_to_fill; - buffer = &queue->bufs[queue->next_buf_to_fill]; + buffer = queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed @@ -3197,7 +3985,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; - buffer = &queue->bufs[queue->next_buf_to_fill]; + buffer = queue->bufs[queue->next_buf_to_fill]; /* we did a step forward, so check buffer state * again */ if (atomic_read(&buffer->state) != @@ -3252,14 +4040,14 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd; struct qeth_ipacmd_setadpparms *setparms; - QETH_DBF_TEXT(TRACE, 4, "prmadpcb"); + QETH_CARD_TEXT(card, 4, "prmadpcb"); cmd = (struct qeth_ipa_cmd *) data; setparms = &(cmd->data.setadapterparms); qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); if (cmd->hdr.return_code) { - QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code); setparms->data.mode = SET_PROMISC_MODE_OFF; } card->info.promisc_mode = setparms->data.mode; @@ -3273,7 +4061,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "setprom"); + QETH_CARD_TEXT(card, 4, "setprom"); if (((dev->flags & IFF_PROMISC) && (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || @@ -3283,7 +4071,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) mode = SET_PROMISC_MODE_OFF; if (dev->flags & IFF_PROMISC) mode = SET_PROMISC_MODE_ON; - QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode); + QETH_CARD_TEXT_(card, 4, "mode:%x", mode); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, sizeof(struct qeth_ipacmd_setadpparms)); @@ -3300,9 +4088,9 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu) card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 4, "chgmtu"); + QETH_CARD_TEXT(card, 4, "chgmtu"); sprintf(dbf_text, "%8x", new_mtu); - QETH_DBF_TEXT(TRACE, 4, dbf_text); + QETH_CARD_TEXT(card, 4, dbf_text); if (new_mtu < 64) return -EINVAL; @@ -3322,7 +4110,7 @@ struct net_device_stats *qeth_get_stats(struct net_device *dev) card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 5, "getstat"); + QETH_CARD_TEXT(card, 5, "getstat"); return &card->stats; } @@ -3333,7 +4121,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "chgmaccb"); + QETH_CARD_TEXT(card, 4, "chgmaccb"); cmd = (struct qeth_ipa_cmd *) data; if (!card->options.layer2 || @@ -3353,7 +4141,7 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "chgmac"); + QETH_CARD_TEXT(card, 4, "chgmac"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, sizeof(struct qeth_ipacmd_setadpparms)); @@ -3373,9 +4161,9 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; struct qeth_set_access_ctrl *access_ctrl_req; - int rc; + int fallback = *(int *)reply->param; - QETH_DBF_TEXT(TRACE, 4, "setaccb"); + QETH_CARD_TEXT(card, 4, "setaccb"); cmd = (struct qeth_ipa_cmd *) data; access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; @@ -3383,12 +4171,14 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); QETH_DBF_TEXT_(SETUP, 2, "rc=%d", cmd->data.setadapterparms.hdr.return_code); + if (cmd->data.setadapterparms.hdr.return_code != + SET_ACCESS_CTRL_RC_SUCCESS) + QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n", + card->gdev->dev.kobj.name, + access_ctrl_req->subcmd_code, + cmd->data.setadapterparms.hdr.return_code); switch (cmd->data.setadapterparms.hdr.return_code) { case SET_ACCESS_CTRL_RC_SUCCESS: - case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: - case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: - { - card->options.isolation = access_ctrl_req->subcmd_code; if (card->options.isolation == ISOLATION_MODE_NONE) { dev_info(&card->gdev->dev, "QDIO data connection isolation is deactivated\n"); @@ -3396,84 +4186,71 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, dev_info(&card->gdev->dev, "QDIO data connection isolation is activated\n"); } - QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); - rc = 0; break; - } + case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: + QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already " + "deactivated\n", dev_name(&card->gdev->dev)); + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: + QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already" + " activated\n", dev_name(&card->gdev->dev)); + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: - { - QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); - - /* ensure isolation mode is "none" */ - card->options.isolation = ISOLATION_MODE_NONE; - rc = -EOPNOTSUPP; break; - } case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: - { - QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); dev_err(&card->gdev->dev, "Adapter is dedicated. " "QDIO data connection isolation not supported\n"); - - /* ensure isolation mode is "none" */ - card->options.isolation = ISOLATION_MODE_NONE; - rc = -EOPNOTSUPP; + if (fallback) + card->options.isolation = card->options.prev_isolation; break; - } case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: - { - QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); dev_err(&card->gdev->dev, "TSO does not permit QDIO data connection isolation\n"); - - /* ensure isolation mode is "none" */ - card->options.isolation = ISOLATION_MODE_NONE; - rc = -EPERM; + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: + dev_err(&card->gdev->dev, "The adjacent switch port does not " + "support reflective relay mode\n"); + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + case SET_ACCESS_CTRL_RC_REFLREL_FAILED: + dev_err(&card->gdev->dev, "The reflective relay mode cannot be " + "enabled at the adjacent switch port"); + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: + dev_warn(&card->gdev->dev, "Turning off reflective relay mode " + "at the adjacent switch failed\n"); break; - } default: - { /* this should never happen */ - QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d" - "==UNKNOWN\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); - - /* ensure isolation mode is "none" */ - card->options.isolation = ISOLATION_MODE_NONE; - rc = 0; + if (fallback) + card->options.isolation = card->options.prev_isolation; break; } - } qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); - return rc; + return 0; } static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, - enum qeth_ipa_isolation_modes isolation) + enum qeth_ipa_isolation_modes isolation, int fallback) { int rc; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; struct qeth_set_access_ctrl *access_ctrl_req; - QETH_DBF_TEXT(TRACE, 4, "setacctl"); + QETH_CARD_TEXT(card, 4, "setacctl"); QETH_DBF_TEXT_(SETUP, 2, "setacctl"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); @@ -3486,26 +4263,28 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, access_ctrl_req->subcmd_code = isolation; rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, - NULL); + &fallback); QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc); return rc; } -int qeth_set_access_ctrl_online(struct qeth_card *card) +int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) { int rc = 0; - QETH_DBF_TEXT(TRACE, 4, "setactlo"); + QETH_CARD_TEXT(card, 4, "setactlo"); - if (card->info.type == QETH_CARD_TYPE_OSAE && - qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { + if ((card->info.type == QETH_CARD_TYPE_OSD || + card->info.type == QETH_CARD_TYPE_OSX) && + qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { rc = qeth_setadpparms_set_access_ctrl(card, - card->options.isolation); + card->options.isolation, fallback); if (rc) { QETH_DBF_MESSAGE(3, - "IPA(SET_ACCESS_CTRL,%s,%d) sent failed", + "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n", card->gdev->dev.kobj.name, rc); + rc = -EOPNOTSUPP; } } else if (card->options.isolation != ISOLATION_MODE_NONE) { card->options.isolation = ISOLATION_MODE_NONE; @@ -3522,8 +4301,8 @@ void qeth_tx_timeout(struct net_device *dev) { struct qeth_card *card; - QETH_DBF_TEXT(TRACE, 4, "txtimeo"); card = dev->ml_priv; + QETH_CARD_TEXT(card, 4, "txtimeo"); card->stats.tx_errors++; qeth_schedule_recovery(card); } @@ -3602,7 +4381,7 @@ static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, { u16 s1, s2; - QETH_DBF_TEXT(TRACE, 4, "sendsnmp"); + QETH_CARD_TEXT(card, 4, "sendsnmp"); memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), @@ -3627,7 +4406,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card, unsigned char *data; __u16 data_len; - QETH_DBF_TEXT(TRACE, 3, "snpcmdcb"); + QETH_CARD_TEXT(card, 3, "snpcmdcb"); cmd = (struct qeth_ipa_cmd *) sdata; data = (unsigned char *)((char *)cmd - reply->offset); @@ -3635,13 +4414,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, snmp = &cmd->data.setadapterparms.data.snmp; if (cmd->hdr.return_code) { - QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code); return 0; } if (cmd->data.setadapterparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; - QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code); return 0; } data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); @@ -3652,13 +4431,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, /* check if there is enough room in userspace */ if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { - QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM); - cmd->hdr.return_code = -ENOMEM; + QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM); + cmd->hdr.return_code = IPA_RC_ENOMEM; return 0; } - QETH_DBF_TEXT_(TRACE, 4, "snore%i", + QETH_CARD_TEXT_(card, 4, "snore%i", cmd->data.setadapterparms.hdr.used_total); - QETH_DBF_TEXT_(TRACE, 4, "sseqn%i", + QETH_CARD_TEXT_(card, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no); /*copy entries to user buffer*/ if (cmd->data.setadapterparms.hdr.seq_no == 1) { @@ -3672,9 +4451,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card, } qinfo->udata_offset += data_len; /* check if all replies received ... */ - QETH_DBF_TEXT_(TRACE, 4, "srtot%i", + QETH_CARD_TEXT_(card, 4, "srtot%i", cmd->data.setadapterparms.hdr.used_total); - QETH_DBF_TEXT_(TRACE, 4, "srseq%i", + QETH_CARD_TEXT_(card, 4, "srseq%i", cmd->data.setadapterparms.hdr.seq_no); if (cmd->data.setadapterparms.hdr.seq_no < cmd->data.setadapterparms.hdr.used_total) @@ -3687,11 +4466,11 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; struct qeth_snmp_ureq *ureq; - int req_len; + unsigned int req_len; struct qeth_arp_query_info qinfo = {0, }; int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "snmpcmd"); + QETH_CARD_TEXT(card, 3, "snmpcmd"); if (card->info.guestlan) return -EOPNOTSUPP; @@ -3703,15 +4482,14 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) /* skip 4 bytes (data_len struct member) to get req_len */ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) return -EFAULT; - ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL); - if (!ureq) { - QETH_DBF_TEXT(TRACE, 2, "snmpnome"); - return -ENOMEM; - } - if (copy_from_user(ureq, udata, - req_len + sizeof(struct qeth_snmp_ureq_hdr))) { - kfree(ureq); - return -EFAULT; + if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE - + sizeof(struct qeth_ipacmd_hdr) - + sizeof(struct qeth_ipacmd_setadpparms_hdr))) + return -EINVAL; + ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr)); + if (IS_ERR(ureq)) { + QETH_CARD_TEXT(card, 2, "snmpnome"); + return PTR_ERR(ureq); } qinfo.udata_len = ureq->hdr.data_len; qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); @@ -3741,6 +4519,140 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) } EXPORT_SYMBOL_GPL(qeth_snmp_command); +static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_qoat_priv *priv; + char *resdata; + int resdatalen; + + QETH_CARD_TEXT(card, 3, "qoatcb"); + + cmd = (struct qeth_ipa_cmd *)data; + priv = (struct qeth_qoat_priv *)reply->param; + resdatalen = cmd->data.setadapterparms.hdr.cmdlength; + resdata = (char *)data + 28; + + if (resdatalen > (priv->buffer_len - priv->response_len)) { + cmd->hdr.return_code = IPA_RC_FFFF; + return 0; + } + + memcpy((priv->buffer + priv->response_len), resdata, + resdatalen); + priv->response_len += resdatalen; + + if (cmd->data.setadapterparms.hdr.seq_no < + cmd->data.setadapterparms.hdr.used_total) + return 1; + return 0; +} + +int qeth_query_oat_command(struct qeth_card *card, char __user *udata) +{ + int rc = 0; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct qeth_query_oat *oat_req; + struct qeth_query_oat_data oat_data; + struct qeth_qoat_priv priv; + void __user *tmp; + + QETH_CARD_TEXT(card, 3, "qoatcmd"); + + if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { + rc = -EOPNOTSUPP; + goto out; + } + + if (copy_from_user(&oat_data, udata, + sizeof(struct qeth_query_oat_data))) { + rc = -EFAULT; + goto out; + } + + priv.buffer_len = oat_data.buffer_len; + priv.response_len = 0; + priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL); + if (!priv.buffer) { + rc = -ENOMEM; + goto out; + } + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, + sizeof(struct qeth_ipacmd_setadpparms_hdr) + + sizeof(struct qeth_query_oat)); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + oat_req = &cmd->data.setadapterparms.data.query_oat; + oat_req->subcmd_code = oat_data.command; + + rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, + &priv); + if (!rc) { + if (is_compat_task()) + tmp = compat_ptr(oat_data.ptr); + else + tmp = (void __user *)(unsigned long)oat_data.ptr; + + if (copy_to_user(tmp, priv.buffer, + priv.response_len)) { + rc = -EFAULT; + goto out_free; + } + + oat_data.response_len = priv.response_len; + + if (copy_to_user(udata, &oat_data, + sizeof(struct qeth_query_oat_data))) + rc = -EFAULT; + } else + if (rc == IPA_RC_FFFF) + rc = -EFAULT; + +out_free: + kfree(priv.buffer); +out: + return rc; +} +EXPORT_SYMBOL_GPL(qeth_query_oat_command); + +static int qeth_query_card_info_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_query_card_info *card_info; + struct carrier_info *carrier_info; + + QETH_CARD_TEXT(card, 2, "qcrdincb"); + carrier_info = (struct carrier_info *)reply->param; + cmd = (struct qeth_ipa_cmd *)data; + card_info = &cmd->data.setadapterparms.data.card_info; + if (cmd->data.setadapterparms.hdr.return_code == 0) { + carrier_info->card_type = card_info->card_type; + carrier_info->port_mode = card_info->port_mode; + carrier_info->port_speed = card_info->port_speed; + } + + qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); + return 0; +} + +int qeth_query_card_info(struct qeth_card *card, + struct carrier_info *carrier_info) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "qcrdinfo"); + if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) + return -EOPNOTSUPP; + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, + sizeof(struct qeth_ipacmd_setadpparms_hdr)); + return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, + (void *)carrier_info); +} +EXPORT_SYMBOL_GPL(qeth_query_card_info); + static inline int qeth_get_qdio_q_format(struct qeth_card *card) { switch (card->info.type) { @@ -3751,11 +4663,86 @@ static inline int qeth_get_qdio_q_format(struct qeth_card *card) } } +static void qeth_determine_capabilities(struct qeth_card *card) +{ + int rc; + int length; + char *prcd; + struct ccw_device *ddev; + int ddev_offline = 0; + + QETH_DBF_TEXT(SETUP, 2, "detcapab"); + ddev = CARD_DDEV(card); + if (!ddev->online) { + ddev_offline = 1; + rc = ccw_device_set_online(ddev); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); + goto out; + } + } + + rc = qeth_read_conf_data(card, (void **) &prcd, &length); + if (rc) { + QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", + dev_name(&card->gdev->dev), rc); + QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); + goto out_offline; + } + qeth_configure_unitaddr(card, prcd); + if (ddev_offline) + qeth_configure_blkt_default(card, prcd); + kfree(prcd); + + rc = qdio_get_ssqd_desc(ddev, &card->ssqd); + if (rc) + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + + QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt); + QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1); + QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3); + QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt); + if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || + ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || + ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { + dev_info(&card->gdev->dev, + "Completion Queueing supported\n"); + } else { + card->options.cq = QETH_CQ_NOTAVAILABLE; + } + + +out_offline: + if (ddev_offline == 1) + ccw_device_set_offline(ddev); +out: + return; +} + +static inline void qeth_qdio_establish_cq(struct qeth_card *card, + struct qdio_buffer **in_sbal_ptrs, + void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) { + int i; + + if (card->options.cq == QETH_CQ_ENABLED) { + int offset = QDIO_MAX_BUFFERS_PER_Q * + (card->qdio.no_in_queues - 1); + i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { + in_sbal_ptrs[offset + i] = (struct qdio_buffer *) + virt_to_phys(card->qdio.c_q->bufs[i].buffer); + } + + queue_start_poll[card->qdio.no_in_queues - 1] = NULL; + } +} + static int qeth_qdio_establish(struct qeth_card *card) { struct qdio_initialize init_data; char *qib_param_field; struct qdio_buffer **in_sbal_ptrs; + void (**queue_start_poll) (struct ccw_device *, int, unsigned long); struct qdio_buffer **out_sbal_ptrs; int i, j, k; int rc = 0; @@ -3764,34 +4751,48 @@ static int qeth_qdio_establish(struct qeth_card *card) qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), GFP_KERNEL); - if (!qib_param_field) - return -ENOMEM; + if (!qib_param_field) { + rc = -ENOMEM; + goto out_free_nothing; + } qeth_create_qib_param_field(card, qib_param_field); qeth_create_qib_param_field_blkt(card, qib_param_field); - in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), + in_sbal_ptrs = kzalloc(card->qdio.no_in_queues * + QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), GFP_KERNEL); if (!in_sbal_ptrs) { - kfree(qib_param_field); - return -ENOMEM; + rc = -ENOMEM; + goto out_free_qib_param; } - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { in_sbal_ptrs[i] = (struct qdio_buffer *) virt_to_phys(card->qdio.in_q->bufs[i].buffer); + } + + queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues, + GFP_KERNEL); + if (!queue_start_poll) { + rc = -ENOMEM; + goto out_free_in_sbals; + } + for (i = 0; i < card->qdio.no_in_queues; ++i) + queue_start_poll[i] = card->discipline->start_poll; + + qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); out_sbal_ptrs = - kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * + kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), GFP_KERNEL); if (!out_sbal_ptrs) { - kfree(in_sbal_ptrs); - kfree(qib_param_field); - return -ENOMEM; + rc = -ENOMEM; + goto out_free_queue_start_poll; } for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( - card->qdio.out_qs[i]->bufs[j].buffer); + card->qdio.out_qs[i]->bufs[j]->buffer); } memset(&init_data, 0, sizeof(struct qdio_initialize)); @@ -3799,23 +4800,51 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.q_format = qeth_get_qdio_q_format(card); init_data.qib_param_field_format = 0; init_data.qib_param_field = qib_param_field; - init_data.no_input_qs = 1; + init_data.no_input_qs = card->qdio.no_in_queues; init_data.no_output_qs = card->qdio.no_out_queues; - init_data.input_handler = card->discipline.input_handler; - init_data.output_handler = card->discipline.output_handler; + init_data.input_handler = card->discipline->input_handler; + init_data.output_handler = card->discipline->output_handler; + init_data.queue_start_poll_array = queue_start_poll; init_data.int_parm = (unsigned long) card; init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; + init_data.output_sbal_state_array = card->qdio.out_bufstates; + init_data.scan_threshold = + (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32; if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { - rc = qdio_initialize(&init_data); - if (rc) + rc = qdio_allocate(&init_data); + if (rc) { atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); + goto out; + } + rc = qdio_establish(&init_data); + if (rc) { + atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); + qdio_free(CARD_DDEV(card)); + } + } + + switch (card->options.cq) { + case QETH_CQ_ENABLED: + dev_info(&card->gdev->dev, "Completion Queue support enabled"); + break; + case QETH_CQ_DISABLED: + dev_info(&card->gdev->dev, "Completion Queue support disabled"); + break; + default: + break; } +out: kfree(out_sbal_ptrs); +out_free_queue_start_poll: + kfree(queue_start_poll); +out_free_in_sbals: kfree(in_sbal_ptrs); +out_free_qib_param: kfree(qib_param_field); +out_free_nothing: return rc; } @@ -3834,42 +4863,61 @@ static void qeth_core_free_card(struct qeth_card *card) kfree(card); } +void qeth_trace_features(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 2, "features"); + QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.supported_funcs); + QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.enabled_funcs); + QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.supported_funcs); + QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.enabled_funcs); + QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.supported_funcs); + QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.enabled_funcs); + QETH_CARD_TEXT_(card, 2, "%x", card->info.diagass_support); +} +EXPORT_SYMBOL_GPL(qeth_trace_features); + static struct ccw_device_id qeth_ids[] = { - {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE}, - {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD}, - {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN}, + {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), + .driver_info = QETH_CARD_TYPE_OSD}, + {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), + .driver_info = QETH_CARD_TYPE_IQD}, + {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), + .driver_info = QETH_CARD_TYPE_OSN}, + {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), + .driver_info = QETH_CARD_TYPE_OSM}, + {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), + .driver_info = QETH_CARD_TYPE_OSX}, {}, }; MODULE_DEVICE_TABLE(ccw, qeth_ids); static struct ccw_driver qeth_ccw_driver = { - .name = "qeth", + .driver = { + .owner = THIS_MODULE, + .name = "qeth", + }, .ids = qeth_ids, .probe = ccwgroup_probe_ccwdev, .remove = ccwgroup_remove_ccwdev, }; -static int qeth_core_driver_group(const char *buf, struct device *root_dev, - unsigned long driver_id) -{ - return ccwgroup_create_from_string(root_dev, driver_id, - &qeth_ccw_driver, 3, buf); -} - int qeth_core_hardsetup_card(struct qeth_card *card) { - int retries = 0; + int retries = 3; int rc; QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); + qeth_update_from_chp_desc(card); retry: - if (retries) + if (retries < 3) QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", dev_name(&card->gdev->dev)); + rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); + qdio_free(CARD_DDEV(card)); rc = ccw_device_set_online(CARD_RDEV(card)); if (rc) goto retriable; @@ -3879,18 +4927,18 @@ retry: rc = ccw_device_set_online(CARD_DDEV(card)); if (rc) goto retriable; - rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); retriable: if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break1"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (++retries > 3) + if (--retries < 0) goto out; else goto retry; } + qeth_determine_capabilities(card); qeth_init_tokens(card); qeth_init_func_level(card); rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); @@ -3915,11 +4963,22 @@ retriable: else goto retry; } + card->read_or_write_problem = 0; rc = qeth_mpc_initialize(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out; } + + card->options.ipa4.supported_funcs = 0; + card->options.adp.supported_funcs = 0; + card->options.sbp.supported_funcs = 0; + card->info.diagass_support = 0; + qeth_query_ipassists(card, QETH_PROT_IPV4); + if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) + qeth_query_setadapterparms(card); + if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) + qeth_query_setdiagass(card); return 0; out: dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " @@ -3930,29 +4989,36 @@ out: } EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); -static inline int qeth_create_skb_frag(struct qdio_buffer_element *element, +static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, + struct qdio_buffer_element *element, struct sk_buff **pskb, int offset, int *pfrag, int data_len) { struct page *page = virt_to_page(element->addr); if (*pskb == NULL) { - /* the upper protocol layers assume that there is data in the - * skb itself. Copy a small amount (64 bytes) to make them - * happy. */ - *pskb = dev_alloc_skb(64 + ETH_HLEN); - if (!(*pskb)) - return -ENOMEM; + if (qethbuffer->rx_skb) { + /* only if qeth_card.options.cq == QETH_CQ_ENABLED */ + *pskb = qethbuffer->rx_skb; + qethbuffer->rx_skb = NULL; + } else { + *pskb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN); + if (!(*pskb)) + return -ENOMEM; + } + skb_reserve(*pskb, ETH_HLEN); - if (data_len <= 64) { + if (data_len <= QETH_RX_PULL_LEN) { memcpy(skb_put(*pskb, data_len), element->addr + offset, data_len); } else { get_page(page); - memcpy(skb_put(*pskb, 64), element->addr + offset, 64); - skb_fill_page_desc(*pskb, *pfrag, page, offset + 64, - data_len - 64); - (*pskb)->data_len += data_len - 64; - (*pskb)->len += data_len - 64; - (*pskb)->truesize += data_len - 64; + memcpy(skb_put(*pskb, QETH_RX_PULL_LEN), + element->addr + offset, QETH_RX_PULL_LEN); + skb_fill_page_desc(*pskb, *pfrag, page, + offset + QETH_RX_PULL_LEN, + data_len - QETH_RX_PULL_LEN); + (*pskb)->data_len += data_len - QETH_RX_PULL_LEN; + (*pskb)->len += data_len - QETH_RX_PULL_LEN; + (*pskb)->truesize += data_len - QETH_RX_PULL_LEN; (*pfrag)++; } } else { @@ -3963,15 +5029,18 @@ static inline int qeth_create_skb_frag(struct qdio_buffer_element *element, (*pskb)->truesize += data_len; (*pfrag)++; } + + return 0; } struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, - struct qdio_buffer *buffer, + struct qeth_qdio_buffer *qethbuffer, struct qdio_buffer_element **__element, int *__offset, struct qeth_hdr **hdr) { struct qdio_buffer_element *element = *__element; + struct qdio_buffer *buffer = qethbuffer->buffer; int offset = *__offset; struct sk_buff *skb = NULL; int skb_len = 0; @@ -3999,11 +5068,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, break; case QETH_HEADER_TYPE_LAYER3: skb_len = (*hdr)->hdr.l3.length; - if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || - (card->info.link_type == QETH_LINK_TYPE_HSTR)) - headroom = TR_HLEN; - else - headroom = ETH_HLEN; + headroom = ETH_HLEN; break; case QETH_HEADER_TYPE_OSN: skb_len = (*hdr)->hdr.osn.pdu_length; @@ -4016,9 +5081,10 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, if (!skb_len) return NULL; - if ((skb_len >= card->options.rx_sg_cb) && - (!(card->info.type == QETH_CARD_TYPE_OSN)) && - (!atomic_read(&card->force_alloc_skb))) { + if (((skb_len >= card->options.rx_sg_cb) && + (!(card->info.type == QETH_CARD_TYPE_OSN)) && + (!atomic_read(&card->force_alloc_skb))) || + (card->options.cq == QETH_CQ_ENABLED)) { use_rx_sg = 1; } else { skb = dev_alloc_skb(skb_len + headroom); @@ -4033,8 +5099,8 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, data_len = min(skb_len, (int)(element->length - offset)); if (data_len) { if (use_rx_sg) { - if (qeth_create_skb_frag(element, &skb, offset, - &frag, data_len)) + if (qeth_create_skb_frag(qethbuffer, element, + &skb, offset, &frag, data_len)) goto no_mem; } else { memcpy(skb_put(skb, data_len), data_ptr, @@ -4044,13 +5110,8 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, skb_len -= data_len; if (skb_len) { if (qeth_is_last_sbale(element)) { - QETH_DBF_TEXT(TRACE, 4, "unexeob"); - QETH_DBF_TEXT_(TRACE, 4, "%s", - CARD_BUS_ID(card)); - QETH_DBF_TEXT(QERR, 2, "unexeob"); - QETH_DBF_TEXT_(QERR, 2, "%s", - CARD_BUS_ID(card)); - QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer)); + QETH_CARD_TEXT(card, 4, "unexeob"); + QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); dev_kfree_skb_any(skb); card->stats.rx_errors++; return NULL; @@ -4071,8 +5132,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, return skb; no_mem: if (net_ratelimit()) { - QETH_DBF_TEXT(TRACE, 2, "noskbmem"); - QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); + QETH_CARD_TEXT(card, 2, "noskbmem"); } card->stats.rx_dropped++; return NULL; @@ -4088,17 +5148,17 @@ static void qeth_unregister_dbf_views(void) } } -void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...) +void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) { char dbf_txt_buf[32]; va_list args; - if (level > (qeth_dbf[dbf_nix].id)->level) + if (!debug_level_enabled(id, level)) return; va_start(args, fmt); vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); va_end(args); - debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf); + debug_text_event(id, level, dbf_txt_buf); } EXPORT_SYMBOL_GPL(qeth_dbf_longtext); @@ -4136,68 +5196,110 @@ int qeth_core_load_discipline(struct qeth_card *card, enum qeth_discipline_id discipline) { int rc = 0; + mutex_lock(&qeth_mod_mutex); switch (discipline) { case QETH_DISCIPLINE_LAYER3: - card->discipline.ccwgdriver = try_then_request_module( - symbol_get(qeth_l3_ccwgroup_driver), - "qeth_l3"); + card->discipline = try_then_request_module( + symbol_get(qeth_l3_discipline), "qeth_l3"); break; case QETH_DISCIPLINE_LAYER2: - card->discipline.ccwgdriver = try_then_request_module( - symbol_get(qeth_l2_ccwgroup_driver), - "qeth_l2"); + card->discipline = try_then_request_module( + symbol_get(qeth_l2_discipline), "qeth_l2"); break; } - if (!card->discipline.ccwgdriver) { + if (!card->discipline) { dev_err(&card->gdev->dev, "There is no kernel module to " "support discipline %d\n", discipline); rc = -EINVAL; } + mutex_unlock(&qeth_mod_mutex); return rc; } void qeth_core_free_discipline(struct qeth_card *card) { if (card->options.layer2) - symbol_put(qeth_l2_ccwgroup_driver); + symbol_put(qeth_l2_discipline); else - symbol_put(qeth_l3_ccwgroup_driver); - card->discipline.ccwgdriver = NULL; + symbol_put(qeth_l3_discipline); + card->discipline = NULL; } -static void qeth_determine_capabilities(struct qeth_card *card) +static const struct device_type qeth_generic_devtype = { + .name = "qeth_generic", + .groups = qeth_generic_attr_groups, +}; +static const struct device_type qeth_osn_devtype = { + .name = "qeth_osn", + .groups = qeth_osn_attr_groups, +}; + +#define DBF_NAME_LEN 20 + +struct qeth_dbf_entry { + char dbf_name[DBF_NAME_LEN]; + debug_info_t *dbf_info; + struct list_head dbf_list; +}; + +static LIST_HEAD(qeth_dbf_list); +static DEFINE_MUTEX(qeth_dbf_list_mutex); + +static debug_info_t *qeth_get_dbf_entry(char *name) { - int rc; - int length; - char *prcd; + struct qeth_dbf_entry *entry; + debug_info_t *rc = NULL; - QETH_DBF_TEXT(SETUP, 2, "detcapab"); - rc = ccw_device_set_online(CARD_DDEV(card)); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); - goto out; + mutex_lock(&qeth_dbf_list_mutex); + list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { + if (strcmp(entry->dbf_name, name) == 0) { + rc = entry->dbf_info; + break; + } } + mutex_unlock(&qeth_dbf_list_mutex); + return rc; +} +static int qeth_add_dbf_entry(struct qeth_card *card, char *name) +{ + struct qeth_dbf_entry *new_entry; - rc = qeth_read_conf_data(card, (void **) &prcd, &length); - if (rc) { - QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", - dev_name(&card->gdev->dev), rc); - QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); - goto out_offline; + card->debug = debug_register(name, 2, 1, 8); + if (!card->debug) { + QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); + goto err; } - qeth_configure_unitaddr(card, prcd); - qeth_configure_blkt_default(card, prcd); - kfree(prcd); + if (debug_register_view(card->debug, &debug_hex_ascii_view)) + goto err_dbg; + new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); + if (!new_entry) + goto err_dbg; + strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); + new_entry->dbf_info = card->debug; + mutex_lock(&qeth_dbf_list_mutex); + list_add(&new_entry->dbf_list, &qeth_dbf_list); + mutex_unlock(&qeth_dbf_list_mutex); - rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); - if (rc) - QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + return 0; -out_offline: - ccw_device_set_offline(CARD_DDEV(card)); -out: - return; +err_dbg: + debug_unregister(card->debug); +err: + return -ENOMEM; +} + +static void qeth_clear_dbf_list(void) +{ + struct qeth_dbf_entry *entry, *tmp; + + mutex_lock(&qeth_dbf_list_mutex); + list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { + list_del(&entry->dbf_list); + debug_unregister(entry->dbf_info); + kfree(entry); + } + mutex_unlock(&qeth_dbf_list_mutex); } static int qeth_core_probe_device(struct ccwgroup_device *gdev) @@ -4206,6 +5308,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) struct device *dev; int rc; unsigned long flags; + char dbf_name[DBF_NAME_LEN]; QETH_DBF_TEXT(SETUP, 2, "probedev"); @@ -4221,6 +5324,16 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) rc = -ENOMEM; goto err_dev; } + + snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", + dev_name(&gdev->dev)); + card->debug = qeth_get_dbf_entry(dbf_name); + if (!card->debug) { + rc = qeth_add_dbf_entry(card, dbf_name); + if (rc) + goto err_card; + } + card->read.ccwdev = gdev->cdev[0]; card->write.ccwdev = gdev->cdev[1]; card->data.ccwdev = gdev->cdev[2]; @@ -4241,25 +5354,24 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) goto err_card; } - if (card->info.type == QETH_CARD_TYPE_OSN) { - rc = qeth_core_create_osn_attributes(dev); - if (rc) - goto err_card; + if (card->info.type == QETH_CARD_TYPE_OSN) + gdev->dev.type = &qeth_osn_devtype; + else + gdev->dev.type = &qeth_generic_devtype; + + switch (card->info.type) { + case QETH_CARD_TYPE_OSN: + case QETH_CARD_TYPE_OSM: rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); - if (rc) { - qeth_core_remove_osn_attributes(dev); - goto err_card; - } - rc = card->discipline.ccwgdriver->probe(card->gdev); - if (rc) { - qeth_core_free_discipline(card); - qeth_core_remove_osn_attributes(dev); - goto err_card; - } - } else { - rc = qeth_core_create_device_attributes(dev); if (rc) goto err_card; + rc = card->discipline->setup(card->gdev); + if (rc) + goto err_disc; + case QETH_CARD_TYPE_OSD: + case QETH_CARD_TYPE_OSX: + default: + break; } write_lock_irqsave(&qeth_core_card_list.rwlock, flags); @@ -4269,6 +5381,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) qeth_determine_capabilities(card); return 0; +err_disc: + qeth_core_free_discipline(card); err_card: qeth_core_free_card(card); err_dev: @@ -4282,16 +5396,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); QETH_DBF_TEXT(SETUP, 2, "removedv"); - if (card->discipline.ccwgdriver) { - card->discipline.ccwgdriver->remove(gdev); + + if (card->discipline) { + card->discipline->remove(gdev); qeth_core_free_discipline(card); } - if (card->info.type == QETH_CARD_TYPE_OSN) { - qeth_core_remove_osn_attributes(&gdev->dev); - } else { - qeth_core_remove_device_attributes(&gdev->dev); - } write_lock_irqsave(&qeth_core_card_list.rwlock, flags); list_del(&card->list); write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); @@ -4307,7 +5417,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev) int rc = 0; int def_discipline; - if (!card->discipline.ccwgdriver) { + if (!card->discipline) { if (card->info.type == QETH_CARD_TYPE_IQD) def_discipline = QETH_DISCIPLINE_LAYER3; else @@ -4315,11 +5425,11 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev) rc = qeth_core_load_discipline(card, def_discipline); if (rc) goto err; - rc = card->discipline.ccwgdriver->probe(card->gdev); + rc = card->discipline->setup(card->gdev); if (rc) goto err; } - rc = card->discipline.ccwgdriver->set_online(gdev); + rc = card->discipline->set_online(gdev); err: return rc; } @@ -4327,66 +5437,61 @@ err: static int qeth_core_set_offline(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - return card->discipline.ccwgdriver->set_offline(gdev); + return card->discipline->set_offline(gdev); } static void qeth_core_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline.ccwgdriver && - card->discipline.ccwgdriver->shutdown) - card->discipline.ccwgdriver->shutdown(gdev); + if (card->discipline && card->discipline->shutdown) + card->discipline->shutdown(gdev); } static int qeth_core_prepare(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline.ccwgdriver && - card->discipline.ccwgdriver->prepare) - return card->discipline.ccwgdriver->prepare(gdev); + if (card->discipline && card->discipline->prepare) + return card->discipline->prepare(gdev); return 0; } static void qeth_core_complete(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline.ccwgdriver && - card->discipline.ccwgdriver->complete) - card->discipline.ccwgdriver->complete(gdev); + if (card->discipline && card->discipline->complete) + card->discipline->complete(gdev); } static int qeth_core_freeze(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline.ccwgdriver && - card->discipline.ccwgdriver->freeze) - return card->discipline.ccwgdriver->freeze(gdev); + if (card->discipline && card->discipline->freeze) + return card->discipline->freeze(gdev); return 0; } static int qeth_core_thaw(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline.ccwgdriver && - card->discipline.ccwgdriver->thaw) - return card->discipline.ccwgdriver->thaw(gdev); + if (card->discipline && card->discipline->thaw) + return card->discipline->thaw(gdev); return 0; } static int qeth_core_restore(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline.ccwgdriver && - card->discipline.ccwgdriver->restore) - return card->discipline.ccwgdriver->restore(gdev); + if (card->discipline && card->discipline->restore) + return card->discipline->restore(gdev); return 0; } static struct ccwgroup_driver qeth_core_ccwgroup_driver = { - .owner = THIS_MODULE, - .name = "qeth", - .driver_id = 0xD8C5E3C8, - .probe = qeth_core_probe_device, + .driver = { + .owner = THIS_MODULE, + .name = "qeth", + }, + .setup = qeth_core_probe_device, .remove = qeth_core_remove_device, .set_online = qeth_core_set_online, .set_offline = qeth_core_set_offline, @@ -4398,21 +5503,30 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = { .restore = qeth_core_restore, }; -static ssize_t -qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf, - size_t count) +static ssize_t qeth_core_driver_group_store(struct device_driver *ddrv, + const char *buf, size_t count) { int err; - err = qeth_core_driver_group(buf, qeth_core_root_dev, - qeth_core_ccwgroup_driver.driver_id); - if (err) - return err; - else - return count; -} + err = ccwgroup_create_dev(qeth_core_root_dev, + &qeth_core_ccwgroup_driver, 3, buf); + + return err ? err : count; +} static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store); +static struct attribute *qeth_drv_attrs[] = { + &driver_attr_group.attr, + NULL, +}; +static struct attribute_group qeth_drv_attr_group = { + .attrs = qeth_drv_attrs, +}; +static const struct attribute_group *qeth_drv_attr_groups[] = { + &qeth_drv_attr_group, + NULL, +}; + static struct { const char str[ETH_GSTRING_LEN]; } qeth_ethtool_stats_keys[] = { @@ -4439,8 +5553,8 @@ static struct { /* 20 */{"queue 1 buffer usage"}, {"queue 2 buffer usage"}, {"queue 3 buffer usage"}, - {"rx handler time"}, - {"rx handler count"}, + {"rx poll time"}, + {"rx poll count"}, {"rx do_QDIO time"}, {"rx do_QDIO count"}, {"tx handler time"}, @@ -4451,6 +5565,8 @@ static struct { {"tx do_QDIO count"}, {"tx csum"}, {"tx lin"}, + {"cq handler count"}, + {"cq handler time"} }; int qeth_core_get_sset_count(struct net_device *dev, int stringset) @@ -4509,6 +5625,8 @@ void qeth_core_get_ethtool_stats(struct net_device *dev, data[32] = card->perf_stats.outbound_do_qdio_cnt; data[33] = card->perf_stats.tx_csum; data[34] = card->perf_stats.tx_lin; + data[35] = card->perf_stats.cq_cnt; + data[36] = card->perf_stats.cq_time; } EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); @@ -4530,25 +5648,77 @@ void qeth_core_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct qeth_card *card = dev->ml_priv; - if (card->options.layer2) - strcpy(info->driver, "qeth_l2"); - else - strcpy(info->driver, "qeth_l3"); - strcpy(info->version, "1.0"); - strcpy(info->fw_version, card->info.mcl_level); - sprintf(info->bus_info, "%s/%s/%s", - CARD_RDEV_ID(card), - CARD_WDEV_ID(card), - CARD_DDEV_ID(card)); + strlcpy(info->driver, card->options.layer2 ? "qeth_l2" : "qeth_l3", + sizeof(info->driver)); + strlcpy(info->version, "1.0", sizeof(info->version)); + strlcpy(info->fw_version, card->info.mcl_level, + sizeof(info->fw_version)); + snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s", + CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); } EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); +/* Helper function to fill 'advertizing' and 'supported' which are the same. */ +/* Autoneg and full-duplex are supported and advertized uncondionally. */ +/* Always advertize and support all speeds up to specified, and only one */ +/* specified port type. */ +static void qeth_set_ecmd_adv_sup(struct ethtool_cmd *ecmd, + int maxspeed, int porttype) +{ + int port_sup, port_adv, spd_sup, spd_adv; + + switch (porttype) { + case PORT_TP: + port_sup = SUPPORTED_TP; + port_adv = ADVERTISED_TP; + break; + case PORT_FIBRE: + port_sup = SUPPORTED_FIBRE; + port_adv = ADVERTISED_FIBRE; + break; + default: + port_sup = SUPPORTED_TP; + port_adv = ADVERTISED_TP; + WARN_ON_ONCE(1); + } + + /* "Fallthrough" case'es ordered from high to low result in setting */ + /* flags cumulatively, starting from the specified speed and down to */ + /* the lowest possible. */ + spd_sup = 0; + spd_adv = 0; + switch (maxspeed) { + case SPEED_10000: + spd_sup |= SUPPORTED_10000baseT_Full; + spd_adv |= ADVERTISED_10000baseT_Full; + case SPEED_1000: + spd_sup |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; + spd_adv |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + case SPEED_100: + spd_sup |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; + spd_adv |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + case SPEED_10: + spd_sup |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; + spd_adv |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + break; + default: + spd_sup = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; + spd_adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + WARN_ON_ONCE(1); + } + ecmd->advertising = ADVERTISED_Autoneg | port_adv | spd_adv; + ecmd->supported = SUPPORTED_Autoneg | port_sup | spd_sup; +} + int qeth_core_ethtool_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct qeth_card *card = netdev->ml_priv; enum qeth_link_types link_type; + struct carrier_info carrier_info; + u32 speed; if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) link_type = QETH_LINK_TYPE_10GBIT_ETH; @@ -4556,79 +5726,93 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, link_type = card->info.link_type; ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = SUPPORTED_Autoneg; - ecmd->advertising = ADVERTISED_Autoneg; ecmd->duplex = DUPLEX_FULL; ecmd->autoneg = AUTONEG_ENABLE; switch (link_type) { case QETH_LINK_TYPE_FAST_ETH: case QETH_LINK_TYPE_LANE_ETH100: - ecmd->supported |= SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_TP; - ecmd->speed = SPEED_100; + qeth_set_ecmd_adv_sup(ecmd, SPEED_100, PORT_TP); + speed = SPEED_100; ecmd->port = PORT_TP; break; case QETH_LINK_TYPE_GBIT_ETH: case QETH_LINK_TYPE_LANE_ETH1000: - ecmd->supported |= SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE; - ecmd->speed = SPEED_1000; + qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE); + speed = SPEED_1000; ecmd->port = PORT_FIBRE; break; case QETH_LINK_TYPE_10GBIT_ETH: - ecmd->supported |= SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full | - ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE; - ecmd->speed = SPEED_10000; + qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE); + speed = SPEED_10000; ecmd->port = PORT_FIBRE; break; default: - ecmd->supported |= SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_TP; - ecmd->speed = SPEED_10; + qeth_set_ecmd_adv_sup(ecmd, SPEED_10, PORT_TP); + speed = SPEED_10; ecmd->port = PORT_TP; } + ethtool_cmd_speed_set(ecmd, speed); + + /* Check if we can obtain more accurate information. */ + /* If QUERY_CARD_INFO command is not supported or fails, */ + /* just return the heuristics that was filled above. */ + if (qeth_query_card_info(card, &carrier_info) != 0) + return 0; + + netdev_dbg(netdev, + "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", + carrier_info.card_type, + carrier_info.port_mode, + carrier_info.port_speed); + + /* Update attributes for which we've obtained more authoritative */ + /* information, leave the rest the way they where filled above. */ + switch (carrier_info.card_type) { + case CARD_INFO_TYPE_1G_COPPER_A: + case CARD_INFO_TYPE_1G_COPPER_B: + qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_TP); + ecmd->port = PORT_TP; + break; + case CARD_INFO_TYPE_1G_FIBRE_A: + case CARD_INFO_TYPE_1G_FIBRE_B: + qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE); + ecmd->port = PORT_FIBRE; + break; + case CARD_INFO_TYPE_10G_FIBRE_A: + case CARD_INFO_TYPE_10G_FIBRE_B: + qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE); + ecmd->port = PORT_FIBRE; + break; + } + + switch (carrier_info.port_mode) { + case CARD_INFO_PORTM_FULLDUPLEX: + ecmd->duplex = DUPLEX_FULL; + break; + case CARD_INFO_PORTM_HALFDUPLEX: + ecmd->duplex = DUPLEX_HALF; + break; + } + + switch (carrier_info.port_speed) { + case CARD_INFO_PORTS_10M: + speed = SPEED_10; + break; + case CARD_INFO_PORTS_100M: + speed = SPEED_100; + break; + case CARD_INFO_PORTS_1G: + speed = SPEED_1000; + break; + case CARD_INFO_PORTS_10G: + speed = SPEED_10000; + break; + } + ethtool_cmd_speed_set(ecmd, speed); return 0; } @@ -4640,45 +5824,50 @@ static int __init qeth_core_init(void) pr_info("loading core functions\n"); INIT_LIST_HEAD(&qeth_core_card_list.list); + INIT_LIST_HEAD(&qeth_dbf_list); rwlock_init(&qeth_core_card_list.rwlock); + mutex_init(&qeth_mod_mutex); + + qeth_wq = create_singlethread_workqueue("qeth_wq"); rc = qeth_register_dbf_views(); if (rc) goto out_err; - rc = ccw_driver_register(&qeth_ccw_driver); - if (rc) - goto ccw_err; - rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); - if (rc) - goto ccwgroup_err; - rc = driver_create_file(&qeth_core_ccwgroup_driver.driver, - &driver_attr_group); - if (rc) - goto driver_err; qeth_core_root_dev = root_device_register("qeth"); - rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0; + rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); if (rc) goto register_err; - qeth_core_header_cache = kmem_cache_create("qeth_hdr", sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL); if (!qeth_core_header_cache) { rc = -ENOMEM; goto slab_err; } + qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", + sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); + if (!qeth_qdio_outbuf_cache) { + rc = -ENOMEM; + goto cqslab_err; + } + rc = ccw_driver_register(&qeth_ccw_driver); + if (rc) + goto ccw_err; + qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups; + rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); + if (rc) + goto ccwgroup_err; return 0; -slab_err: - root_device_unregister(qeth_core_root_dev); -register_err: - driver_remove_file(&qeth_core_ccwgroup_driver.driver, - &driver_attr_group); -driver_err: - ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); + ccwgroup_err: ccw_driver_unregister(&qeth_ccw_driver); ccw_err: - QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc); + kmem_cache_destroy(qeth_qdio_outbuf_cache); +cqslab_err: + kmem_cache_destroy(qeth_core_header_cache); +slab_err: + root_device_unregister(qeth_core_root_dev); +register_err: qeth_unregister_dbf_views(); out_err: pr_err("Initializing the qeth device driver failed\n"); @@ -4687,12 +5876,13 @@ out_err: static void __exit qeth_core_exit(void) { - root_device_unregister(qeth_core_root_dev); - driver_remove_file(&qeth_core_ccwgroup_driver.driver, - &driver_attr_group); + qeth_clear_dbf_list(); + destroy_workqueue(qeth_wq); ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); ccw_driver_unregister(&qeth_ccw_driver); + kmem_cache_destroy(qeth_qdio_outbuf_cache); kmem_cache_destroy(qeth_core_header_cache); + root_device_unregister(qeth_core_root_dev); qeth_unregister_dbf_views(); pr_info("core functions removed\n"); } diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c index ec24901c802..7b55768a959 100644 --- a/drivers/s390/net/qeth_core_mpc.c +++ b/drivers/s390/net/qeth_core_mpc.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/qeth_core_mpc.c - * * Copyright IBM Corp. 2007 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, @@ -206,7 +204,9 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = { {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"}, {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"}, {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"}, + {IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"}, {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"}, + {IPA_RC_ENOMEM, "Memory problem"}, {IPA_RC_FFFF, "Unknown Error"} }; @@ -249,10 +249,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = { {IPA_CMD_DELIP, "delip"}, {IPA_CMD_SETADAPTERPARMS, "setadapterparms"}, {IPA_CMD_SET_DIAG_ASS, "set_diag_ass"}, + {IPA_CMD_SETBRIDGEPORT, "set_bridge_port"}, {IPA_CMD_CREATE_ADDR, "create_addr"}, {IPA_CMD_DESTROY_ADDR, "destroy_addr"}, {IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"}, {IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"}, + {IPA_CMD_ADDRESS_CHANGE_NOTIF, "address_change_notification"}, {IPA_CMD_UNKNOWN, "unknown"}, }; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 104a3351e02..cf6a90ed42a 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -1,6 +1,4 @@ /* - * drivers/s390/net/qeth_core_mpc.h - * * Copyright IBM Corp. 2007 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, @@ -48,9 +46,11 @@ extern unsigned char IPA_PDU_HEADER[]; enum qeth_card_types { QETH_CARD_TYPE_UNKNOWN = 0, - QETH_CARD_TYPE_OSAE = 10, - QETH_CARD_TYPE_IQD = 1234, - QETH_CARD_TYPE_OSN = 11, + QETH_CARD_TYPE_OSD = 1, + QETH_CARD_TYPE_IQD = 5, + QETH_CARD_TYPE_OSN = 6, + QETH_CARD_TYPE_OSM = 3, + QETH_CARD_TYPE_OSX = 2, }; #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 @@ -68,24 +68,6 @@ enum qeth_link_types { QETH_LINK_TYPE_ATM_NATIVE = 0x90, }; -enum qeth_tr_macaddr_modes { - QETH_TR_MACADDR_NONCANONICAL = 0, - QETH_TR_MACADDR_CANONICAL = 1, -}; - -enum qeth_tr_broadcast_modes { - QETH_TR_BROADCAST_ALLRINGS = 0, - QETH_TR_BROADCAST_LOCAL = 1, -}; - -/* these values match CHECKSUM_* in include/linux/skbuff.h */ -enum qeth_checksum_types { - SW_CHECKSUMMING = 0, /* TODO: set to bit flag used in IPA Command */ - HW_CHECKSUMMING = 1, - NO_CHECKSUMMING = 2, -}; -#define QETH_CHECKSUM_DEFAULT SW_CHECKSUMMING - /* * Routing stuff */ @@ -122,10 +104,12 @@ enum qeth_ipa_cmds { IPA_CMD_DELIP = 0xb7, IPA_CMD_SETADAPTERPARMS = 0xb8, IPA_CMD_SET_DIAG_ASS = 0xb9, + IPA_CMD_SETBRIDGEPORT = 0xbe, IPA_CMD_CREATE_ADDR = 0xc3, IPA_CMD_DESTROY_ADDR = 0xc4, IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1, IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2, + IPA_CMD_ADDRESS_CHANGE_NOTIF = 0xd3, IPA_CMD_UNKNOWN = 0x00 }; @@ -195,7 +179,9 @@ enum qeth_ipa_return_codes { IPA_RC_INVALID_SETRTG_INDICATOR = 0xe012, IPA_RC_MC_ADDR_ALREADY_DEFINED = 0xe013, IPA_RC_LAN_OFFLINE = 0xe080, + IPA_RC_VEPA_TO_VEB_TRANSITION = 0xe090, IPA_RC_INVALID_IP_VERSION2 = 0xf001, + IPA_RC_ENOMEM = 0xfffe, IPA_RC_FFFF = 0xffff }; /* for DELIP */ @@ -255,6 +241,7 @@ enum qeth_ipa_setadp_cmd { IPA_SETADP_SET_PROMISC_MODE = 0x00000800L, IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L, IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L, + IPA_SETADP_QUERY_OAT = 0x00080000L, }; enum qeth_ipa_mac_ops { CHANGE_ADDR_READ_MAC = 0, @@ -285,8 +272,28 @@ enum qeth_ipa_set_access_mode_rc { SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010, SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014, SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018, + SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED = 0x0022, + SET_ACCESS_CTRL_RC_REFLREL_FAILED = 0x0024, + SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED = 0x0028, +}; +enum qeth_card_info_card_type { + CARD_INFO_TYPE_1G_COPPER_A = 0x61, + CARD_INFO_TYPE_1G_FIBRE_A = 0x71, + CARD_INFO_TYPE_10G_FIBRE_A = 0x91, + CARD_INFO_TYPE_1G_COPPER_B = 0xb1, + CARD_INFO_TYPE_1G_FIBRE_B = 0xa1, + CARD_INFO_TYPE_10G_FIBRE_B = 0xc1, +}; +enum qeth_card_info_port_mode { + CARD_INFO_PORTM_HALFDUPLEX = 0x0002, + CARD_INFO_PORTM_FULLDUPLEX = 0x0003, +}; +enum qeth_card_info_port_speed { + CARD_INFO_PORTS_10M = 0x00000005, + CARD_INFO_PORTS_100M = 0x00000006, + CARD_INFO_PORTS_1G = 0x00000007, + CARD_INFO_PORTS_10G = 0x00000008, }; - /* (SET)DELIP(M) IPA stuff ***************************************************/ struct qeth_ipacmd_setdelip4 { @@ -331,7 +338,7 @@ struct qeth_arp_query_data { __u16 request_bits; __u16 reply_bits; __u32 no_entries; - char data; + char data; /* only for replies */ } __attribute__((packed)); /* used as parameter for arp_query reply */ @@ -402,8 +409,28 @@ struct qeth_snmp_ureq { /* SET_ACCESS_CONTROL: same format for request and reply */ struct qeth_set_access_ctrl { __u32 subcmd_code; + __u8 reserved[8]; } __attribute__((packed)); +struct qeth_query_oat { + __u32 subcmd_code; + __u8 reserved[12]; +} __packed; + +struct qeth_qoat_priv { + __u32 buffer_len; + __u32 response_len; + char *buffer; +}; + +struct qeth_query_card_info { + __u8 card_type; + __u8 reserved1; + __u16 port_mode; + __u32 port_speed; + __u32 reserved2; +}; + struct qeth_ipacmd_setadpparms_hdr { __u32 supp_hw_cmds; __u32 reserved1; @@ -423,6 +450,8 @@ struct qeth_ipacmd_setadpparms { struct qeth_change_addr change_addr; struct qeth_snmp_cmd snmp; struct qeth_set_access_ctrl set_access_ctrl; + struct qeth_query_oat query_oat; + struct qeth_query_card_info card_info; __u32 mode; } data; } __attribute__ ((packed)); @@ -454,6 +483,12 @@ enum qeth_diags_trace_cmds { QETH_DIAGS_CMD_TRACE_QUERY = 0x0010, }; +enum qeth_diags_trap_action { + QETH_DIAGS_TRAP_ARM = 0x01, + QETH_DIAGS_TRAP_DISARM = 0x02, + QETH_DIAGS_TRAP_CAPTURE = 0x04, +}; + struct qeth_ipacmd_diagass { __u32 host_tod2; __u32:32; @@ -463,9 +498,128 @@ struct qeth_ipacmd_diagass { __u8 type; __u8 action; __u16 options; - __u32:32; + __u32 ext; + __u8 cdata[64]; } __attribute__ ((packed)); +/* SETBRIDGEPORT IPA Command: *********************************************/ +enum qeth_ipa_sbp_cmd { + IPA_SBP_QUERY_COMMANDS_SUPPORTED = 0x00000000L, + IPA_SBP_RESET_BRIDGE_PORT_ROLE = 0x00000001L, + IPA_SBP_SET_PRIMARY_BRIDGE_PORT = 0x00000002L, + IPA_SBP_SET_SECONDARY_BRIDGE_PORT = 0x00000004L, + IPA_SBP_QUERY_BRIDGE_PORTS = 0x00000008L, + IPA_SBP_BRIDGE_PORT_STATE_CHANGE = 0x00000010L, +}; + +struct net_if_token { + __u16 devnum; + __u8 cssid; + __u8 iid; + __u8 ssid; + __u8 chpid; + __u16 chid; +} __packed; + +struct mac_addr_lnid { + __u8 mac[6]; + __u16 lnid; +} __packed; + +struct qeth_ipacmd_sbp_hdr { + __u32 supported_sbp_cmds; + __u32 enabled_sbp_cmds; + __u16 cmdlength; + __u16 reserved1; + __u32 command_code; + __u16 return_code; + __u8 used_total; + __u8 seq_no; + __u32 reserved2; +} __packed; + +struct qeth_sbp_query_cmds_supp { + __u32 supported_cmds; + __u32 reserved; +} __packed; + +struct qeth_sbp_reset_role { +} __packed; + +struct qeth_sbp_set_primary { + struct net_if_token token; +} __packed; + +struct qeth_sbp_set_secondary { +} __packed; + +struct qeth_sbp_port_entry { + __u8 role; + __u8 state; + __u8 reserved1; + __u8 reserved2; + struct net_if_token token; +} __packed; + +struct qeth_sbp_query_ports { + __u8 primary_bp_supported; + __u8 secondary_bp_supported; + __u8 num_entries; + __u8 entry_length; + struct qeth_sbp_port_entry entry[]; +} __packed; + +struct qeth_sbp_state_change { + __u8 primary_bp_supported; + __u8 secondary_bp_supported; + __u8 num_entries; + __u8 entry_length; + struct qeth_sbp_port_entry entry[]; +} __packed; + +struct qeth_ipacmd_setbridgeport { + struct qeth_ipacmd_sbp_hdr hdr; + union { + struct qeth_sbp_query_cmds_supp query_cmds_supp; + struct qeth_sbp_reset_role reset_role; + struct qeth_sbp_set_primary set_primary; + struct qeth_sbp_set_secondary set_secondary; + struct qeth_sbp_query_ports query_ports; + struct qeth_sbp_state_change state_change; + } data; +} __packed; + +/* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/ +/* Bitmask for entry->change_code. Both bits may be raised. */ +enum qeth_ipa_addr_change_code { + IPA_ADDR_CHANGE_CODE_VLANID = 0x01, + IPA_ADDR_CHANGE_CODE_MACADDR = 0x02, + IPA_ADDR_CHANGE_CODE_REMOVAL = 0x80, /* else addition */ +}; +enum qeth_ipa_addr_change_retcode { + IPA_ADDR_CHANGE_RETCODE_OK = 0x0000, + IPA_ADDR_CHANGE_RETCODE_LOSTEVENTS = 0x0010, +}; +enum qeth_ipa_addr_change_lostmask { + IPA_ADDR_CHANGE_MASK_OVERFLOW = 0x01, + IPA_ADDR_CHANGE_MASK_STATECHANGE = 0x02, +}; + +struct qeth_ipacmd_addr_change_entry { + struct net_if_token token; + struct mac_addr_lnid addr_lnid; + __u8 change_code; + __u8 reserved1; + __u16 reserved2; +} __packed; + +struct qeth_ipacmd_addr_change { + __u8 lost_event_mask; + __u8 reserved; + __u16 num_entries; + struct qeth_ipacmd_addr_change_entry entry[]; +} __packed; + /* Header for each IPA command */ struct qeth_ipacmd_hdr { __u8 command; @@ -495,6 +649,8 @@ struct qeth_ipa_cmd { struct qeth_ipacmd_setadpparms setadapterparms; struct qeth_set_routing setrtg; struct qeth_ipacmd_diagass diagass; + struct qeth_ipacmd_setbridgeport sbp; + struct qeth_ipacmd_addr_change addrchange; } data; } __attribute__ ((packed)); @@ -614,6 +770,9 @@ extern unsigned char IDX_ACTIVATE_WRITE[]; #define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2) #define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12) #define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09] +#define QETH_IDX_ACT_ERR_EXCL 0x19 +#define QETH_IDX_ACT_ERR_AUTH 0x1E +#define QETH_IDX_ACT_ERR_AUTH_USER 0x20 #define PDU_ENCAPSULATION(buffer) \ (buffer + *(buffer + (*(buffer + 0x0b)) + \ diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 25dfd5abd19..8a25a2be989 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/qeth_core_sys.c - * * Copyright IBM Corp. 2007 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, @@ -122,23 +120,32 @@ static ssize_t qeth_dev_portno_store(struct device *dev, struct qeth_card *card = dev_get_drvdata(dev); char *tmp; unsigned int portno, limit; + int rc = 0; if (!card) return -EINVAL; + mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) - return -EPERM; + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } portno = simple_strtoul(buf, &tmp, 16); - if (portno > QETH_MAX_PORTNO) - return -EINVAL; + if (portno > QETH_MAX_PORTNO) { + rc = -EINVAL; + goto out; + } limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt); - if (portno > limit) - return -EINVAL; - + if (portno > limit) { + rc = -EINVAL; + goto out; + } card->info.portno = portno; - return count; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store); @@ -165,18 +172,23 @@ static ssize_t qeth_dev_portname_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); char *tmp; - int i; + int i, rc = 0; if (!card) return -EINVAL; + mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) - return -EPERM; + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } tmp = strsep((char **) &buf, "\n"); - if ((strlen(tmp) > 8) || (strlen(tmp) == 0)) - return -EINVAL; + if ((strlen(tmp) > 8) || (strlen(tmp) == 0)) { + rc = -EINVAL; + goto out; + } card->info.portname[0] = strlen(tmp); /* for beauty reasons */ @@ -184,8 +196,9 @@ static ssize_t qeth_dev_portname_store(struct device *dev, card->info.portname[i] = ' '; strcpy(card->info.portname + 1, tmp); ASCEBC(card->info.portname + 1, 8); - - return count; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show, @@ -204,6 +217,10 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev, return sprintf(buf, "%s\n", "by precedence"); case QETH_PRIO_Q_ING_TOS: return sprintf(buf, "%s\n", "by type of service"); + case QETH_PRIO_Q_ING_SKB: + return sprintf(buf, "%s\n", "by skb-priority"); + case QETH_PRIO_Q_ING_VLAN: + return sprintf(buf, "%s\n", "by VLAN headers"); default: return sprintf(buf, "always queue %i\n", card->qdio.default_out_queue); @@ -215,28 +232,45 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); char *tmp; + int rc = 0; if (!card) return -EINVAL; + mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) - return -EPERM; + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } /* check if 1920 devices are supported , * if though we have to permit priority queueing */ if (card->qdio.no_out_queues == 1) { card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; - return -EPERM; + rc = -EPERM; + goto out; } tmp = strsep((char **) &buf, "\n"); - if (!strcmp(tmp, "prio_queueing_prec")) + if (!strcmp(tmp, "prio_queueing_prec")) { card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC; - else if (!strcmp(tmp, "prio_queueing_tos")) + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (!strcmp(tmp, "prio_queueing_skb")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (!strcmp(tmp, "prio_queueing_tos")) { card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS; - else if (!strcmp(tmp, "no_prio_queueing:0")) { + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (!strcmp(tmp, "prio_queueing_vlan")) { + if (!card->options.layer2) { + rc = -ENOTSUPP; + goto out; + } + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (!strcmp(tmp, "no_prio_queueing:0")) { card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.default_out_queue = 0; } else if (!strcmp(tmp, "no_prio_queueing:1")) { @@ -251,10 +285,11 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, } else if (!strcmp(tmp, "no_prio_queueing")) { card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; - } else { - return -EINVAL; - } - return count; + } else + rc = -EINVAL; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show, @@ -277,14 +312,17 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev, struct qeth_card *card = dev_get_drvdata(dev); char *tmp; int cnt, old_cnt; - int rc; + int rc = 0; if (!card) return -EINVAL; + mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) - return -EPERM; + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } old_cnt = card->qdio.in_buf_pool.buf_count; cnt = simple_strtoul(buf, &tmp, 10); @@ -293,7 +331,9 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev, if (old_cnt != cnt) { rc = qeth_realloc_buffer_pool(card, cnt); } - return count; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, @@ -337,25 +377,27 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); char *tmp; - int i; + int i, rc = 0; if (!card) return -EINVAL; + mutex_lock(&card->conf_mutex); i = simple_strtoul(buf, &tmp, 16); if ((i == 0) || (i == 1)) { if (i == card->options.performance_stats) - return count; + goto out; card->options.performance_stats = i; if (i == 0) memset(&card->perf_stats, 0, sizeof(struct qeth_perf_stats)); card->perf_stats.initial_rx_packets = card->stats.rx_packets; card->perf_stats.initial_tx_packets = card->stats.tx_packets; - } else { - return -EINVAL; - } - return count; + } else + rc = -EINVAL; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show, @@ -377,15 +419,17 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); char *tmp; - int i, rc; + int i, rc = 0; enum qeth_discipline_id newdis; if (!card) return -EINVAL; - if (((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER))) - return -EPERM; + mutex_lock(&card->discipline_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } i = simple_strtoul(buf, &tmp, 16); switch (i) { @@ -396,26 +440,28 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, newdis = QETH_DISCIPLINE_LAYER2; break; default: - return -EINVAL; + rc = -EINVAL; + goto out; } - if (card->options.layer2 == newdis) { - return count; - } else { - if (card->discipline.ccwgdriver) { - card->discipline.ccwgdriver->remove(card->gdev); + if (card->options.layer2 == newdis) + goto out; + else { + card->info.mac_bits = 0; + if (card->discipline) { + card->discipline->remove(card->gdev); qeth_core_free_discipline(card); } } rc = qeth_core_load_discipline(card, newdis); if (rc) - return rc; + goto out; - rc = card->discipline.ccwgdriver->probe(card->gdev); - if (rc) - return rc; - return count; + rc = card->discipline->setup(card->gdev); +out: + mutex_unlock(&card->discipline_mutex); + return rc ? rc : count; } static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, @@ -454,13 +500,13 @@ static ssize_t qeth_dev_isolation_store(struct device *dev, char *tmp, *curtoken; curtoken = (char *) buf; - if (!card) { - rc = -EINVAL; - goto out; - } + if (!card) + return -EINVAL; + mutex_lock(&card->conf_mutex); /* check for unknown, too, in case we do not yet know who we are */ - if (card->info.type != QETH_CARD_TYPE_OSAE && + if (card->info.type != QETH_CARD_TYPE_OSD && + card->info.type != QETH_CARD_TYPE_OSX && card->info.type != QETH_CARD_TYPE_UNKNOWN) { rc = -EOPNOTSUPP; dev_err(&card->gdev->dev, "Adapter does not " @@ -483,20 +529,82 @@ static ssize_t qeth_dev_isolation_store(struct device *dev, rc = count; /* defer IP assist if device is offline (until discipline->set_online)*/ + card->options.prev_isolation = card->options.isolation; card->options.isolation = isolation; if (card->state == CARD_STATE_SOFTSETUP || card->state == CARD_STATE_UP) { - int ipa_rc = qeth_set_access_ctrl_online(card); + int ipa_rc = qeth_set_access_ctrl_online(card, 1); if (ipa_rc != 0) rc = ipa_rc; } out: + mutex_unlock(&card->conf_mutex); return rc; } static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show, qeth_dev_isolation_store); +static ssize_t qeth_hw_trap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + if (card->info.hwtrap) + return snprintf(buf, 5, "arm\n"); + else + return snprintf(buf, 8, "disarm\n"); +} + +static ssize_t qeth_hw_trap_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + char *tmp, *curtoken; + int state = 0; + curtoken = (char *)buf; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if (card->state == CARD_STATE_SOFTSETUP || card->state == CARD_STATE_UP) + state = 1; + tmp = strsep(&curtoken, "\n"); + + if (!strcmp(tmp, "arm") && !card->info.hwtrap) { + if (state) { + if (qeth_is_diagass_supported(card, + QETH_DIAGS_CMD_TRAP)) { + rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM); + if (!rc) + card->info.hwtrap = 1; + } else + rc = -EINVAL; + } else + card->info.hwtrap = 1; + } else if (!strcmp(tmp, "disarm") && card->info.hwtrap) { + if (state) { + rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + if (!rc) + card->info.hwtrap = 0; + } else + card->info.hwtrap = 0; + } else if (!strcmp(tmp, "trap") && state && card->info.hwtrap) + rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE); + else + rc = -EINVAL; + + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show, + qeth_hw_trap_store); + static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) { @@ -510,22 +618,25 @@ static ssize_t qeth_dev_blkt_store(struct qeth_card *card, const char *buf, size_t count, int *value, int max_value) { char *tmp; - int i; + int i, rc = 0; if (!card) return -EINVAL; + mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) - return -EPERM; - + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } i = simple_strtoul(buf, &tmp, 10); - if (i <= max_value) { + if (i <= max_value) *value = i; - } else { - return -EINVAL; - } - return count; + else + rc = -EINVAL; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static ssize_t qeth_dev_blkt_total_show(struct device *dev, @@ -597,7 +708,6 @@ static struct attribute *qeth_blkt_device_attrs[] = { &dev_attr_inter_jumbo.attr, NULL, }; - static struct attribute_group qeth_device_blkt_group = { .name = "blkt", .attrs = qeth_blkt_device_attrs, @@ -617,13 +727,19 @@ static struct attribute *qeth_device_attrs[] = { &dev_attr_performance_stats.attr, &dev_attr_layer2.attr, &dev_attr_isolation.attr, + &dev_attr_hw_trap.attr, NULL, }; - static struct attribute_group qeth_device_attr_group = { .attrs = qeth_device_attrs, }; +const struct attribute_group *qeth_generic_attr_groups[] = { + &qeth_device_attr_group, + &qeth_device_blkt_group, + NULL, +}; + static struct attribute *qeth_osn_device_attrs[] = { &dev_attr_state.attr, &dev_attr_chpid.attr, @@ -633,37 +749,10 @@ static struct attribute *qeth_osn_device_attrs[] = { &dev_attr_recover.attr, NULL, }; - static struct attribute_group qeth_osn_device_attr_group = { .attrs = qeth_osn_device_attrs, }; - -int qeth_core_create_device_attributes(struct device *dev) -{ - int ret; - ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group); - if (ret) - return ret; - ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group); - if (ret) - sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); - - return 0; -} - -void qeth_core_remove_device_attributes(struct device *dev) -{ - sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); - sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group); -} - -int qeth_core_create_osn_attributes(struct device *dev) -{ - return sysfs_create_group(&dev->kobj, &qeth_osn_device_attr_group); -} - -void qeth_core_remove_osn_attributes(struct device *dev) -{ - sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group); - return; -} +const struct attribute_group *qeth_osn_attr_groups[] = { + &qeth_osn_device_attr_group, + NULL, +}; diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h new file mode 100644 index 00000000000..0767556404b --- /dev/null +++ b/drivers/s390/net/qeth_l2.h @@ -0,0 +1,15 @@ +/* + * Copyright IBM Corp. 2013 + * Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com> + */ + +#ifndef __QETH_L2_H__ +#define __QETH_L2_H__ + +#include "qeth_core.h" + +int qeth_l2_create_device_attributes(struct device *); +void qeth_l2_remove_device_attributes(struct device *); +void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); + +#endif /* __QETH_L2_H__ */ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 6f1e3036baf..5ef5b4f4575 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/qeth_l2_main.c - * * Copyright IBM Corp. 2007, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, @@ -16,12 +14,14 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/ip.h> #include <linux/list.h> #include "qeth_core.h" +#include "qeth_l2.h" static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); @@ -33,6 +33,11 @@ static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, unsigned long)); static void qeth_l2_set_multicast_list(struct net_device *); static int qeth_l2_recover(void *); +static void qeth_bridgeport_query_support(struct qeth_card *card); +static void qeth_bridge_state_change(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); +static void qeth_bridge_host_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { @@ -55,7 +60,9 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); break; case SIOC_QETH_GET_CARD_TYPE: - if ((card->info.type == QETH_CARD_TYPE_OSAE) && + if ((card->info.type == QETH_CARD_TYPE_OSD || + card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSX) && !card->info.guestlan) return 1; return 0; @@ -72,11 +79,14 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) mii_data->val_out = qeth_mdio_read(dev, mii_data->phy_id, mii_data->reg_num); break; + case SIOC_QETH_QUERY_OAT: + rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); + break; default: rc = -EOPNOTSUPP; } if (rc) - QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc); + QETH_CARD_TEXT_(card, 2, "ioce%d", rc); return rc; } @@ -127,7 +137,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd; __u8 *mac; - QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb"); + QETH_CARD_TEXT(card, 2, "L2Sgmacb"); cmd = (struct qeth_ipa_cmd *) data; mac = &cmd->data.setdelmac.mac[0]; /* MAC already registered, needed in couple/uncouple case */ @@ -144,7 +154,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) { - QETH_DBF_TEXT(TRACE, 2, "L2Sgmac"); + QETH_CARD_TEXT(card, 2, "L2Sgmac"); return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, qeth_l2_send_setgroupmac_cb); } @@ -156,7 +166,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd; __u8 *mac; - QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb"); + QETH_CARD_TEXT(card, 2, "L2Dgmacb"); cmd = (struct qeth_ipa_cmd *) data; mac = &cmd->data.setdelmac.mac[0]; if (cmd->hdr.return_code) @@ -167,7 +177,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) { - QETH_DBF_TEXT(TRACE, 2, "L2Dgmac"); + QETH_CARD_TEXT(card, 2, "L2Dgmac"); return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, qeth_l2_send_delgroupmac_cb); } @@ -199,17 +209,19 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) kfree(mc); } -static void qeth_l2_del_all_mc(struct qeth_card *card) +static void qeth_l2_del_all_mc(struct qeth_card *card, int del) { struct qeth_mc_mac *mc, *tmp; spin_lock_bh(&card->mclock); list_for_each_entry_safe(mc, tmp, &card->mc_list, list) { - if (mc->is_vmac) - qeth_l2_send_setdelmac(card, mc->mc_addr, + if (del) { + if (mc->is_vmac) + qeth_l2_send_setdelmac(card, mc->mc_addr, IPA_CMD_DELVMAC, NULL); - else - qeth_l2_send_delgroupmac(card, mc->mc_addr); + else + qeth_l2_send_delgroupmac(card, mc->mc_addr); + } list_del(&mc->list); kfree(mc); } @@ -229,7 +241,7 @@ static inline int qeth_l2_get_cast_type(struct qeth_card *card, } static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int ipv, int cast_type) + struct sk_buff *skb, int cast_type) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); @@ -259,15 +271,14 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 2, "L2sdvcb"); + QETH_CARD_TEXT(card, 2, "L2sdvcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code) { QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. " "Continuing\n", cmd->data.setdelvlan.vlan_id, QETH_CARD_IFNAME(card), cmd->hdr.return_code); - QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command); - QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); + QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); } return 0; } @@ -278,7 +289,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, struct qeth_ipa_cmd *cmd; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd); + QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelvlan.vlan_id = i; @@ -286,31 +297,33 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, qeth_l2_send_setdelvlan_cb, NULL); } -static void qeth_l2_process_vlans(struct qeth_card *card, int clear) +static void qeth_l2_process_vlans(struct qeth_card *card) { struct qeth_vlan_vid *id; - QETH_DBF_TEXT(TRACE, 3, "L2prcvln"); + QETH_CARD_TEXT(card, 3, "L2prcvln"); spin_lock_bh(&card->vlanlock); list_for_each_entry(id, &card->vid_list, list) { - if (clear) - qeth_l2_send_setdelvlan(card, id->vid, - IPA_CMD_DELVLAN); - else - qeth_l2_send_setdelvlan(card, id->vid, - IPA_CMD_SETVLAN); + qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN); } spin_unlock_bh(&card->vlanlock); } -static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct qeth_card *card = dev->ml_priv; struct qeth_vlan_vid *id; - QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid); + QETH_CARD_TEXT_(card, 4, "aid:%d", vid); + if (!vid) + return 0; + if (card->info.type == QETH_CARD_TYPE_OSM) { + QETH_CARD_TEXT(card, 3, "aidOSM"); + return 0; + } if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_DBF_TEXT(TRACE, 3, "aidREC"); - return; + QETH_CARD_TEXT(card, 3, "aidREC"); + return 0; } id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); if (id) { @@ -319,18 +332,26 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) spin_lock_bh(&card->vlanlock); list_add_tail(&id->list, &card->vid_list); spin_unlock_bh(&card->vlanlock); + } else { + return -ENOMEM; } + return 0; } -static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct qeth_vlan_vid *id, *tmpid = NULL; struct qeth_card *card = dev->ml_priv; - QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); + QETH_CARD_TEXT_(card, 4, "kid:%d", vid); + if (card->info.type == QETH_CARD_TYPE_OSM) { + QETH_CARD_TEXT(card, 3, "kidOSM"); + return 0; + } if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_DBF_TEXT(TRACE, 3, "kidREC"); - return; + QETH_CARD_TEXT(card, 3, "kidREC"); + return 0; } spin_lock_bh(&card->vlanlock); list_for_each_entry(id, &card->vid_list, list) { @@ -346,6 +367,7 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) kfree(tmpid); } qeth_l2_set_multicast_list(card->dev); + return 0; } static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) @@ -367,19 +389,11 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) dev_close(card->dev); rtnl_unlock(); } - if (!card->use_hard_stop || - recovery_mode) { - __u8 *mac = &card->dev->dev_addr[0]; - rc = qeth_l2_send_delmac(card, mac); - QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc); - } + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; card->state = CARD_STATE_SOFTSETUP; } if (card->state == CARD_STATE_SOFTSETUP) { - qeth_l2_process_vlans(card, 1); - if (!card->use_hard_stop || - recovery_mode) - qeth_l2_del_all_mc(card); + qeth_l2_del_all_mc(card, 0); qeth_clear_ipacmd_list(card); card->state = CARD_STATE_HARDSETUP; } @@ -393,45 +407,37 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) qeth_clear_cmd_buffers(&card->read); qeth_clear_cmd_buffers(&card->write); } - card->use_hard_stop = 0; return rc; } -static void qeth_l2_process_inbound_buffer(struct qeth_card *card, - struct qeth_qdio_buffer *buf, int index) +static int qeth_l2_process_inbound_buffer(struct qeth_card *card, + int budget, int *done) { - struct qdio_buffer_element *element; + int work_done = 0; struct sk_buff *skb; struct qeth_hdr *hdr; - int offset; unsigned int len; - /* get first element of current buffer */ - element = (struct qdio_buffer_element *)&buf->buffer->element[0]; - offset = 0; - if (card->options.performance_stats) - card->perf_stats.bufs_rec++; - while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element, - &offset, &hdr))) { - skb->dev = card->dev; - /* is device UP ? */ - if (!(card->dev->flags & IFF_UP)) { - dev_kfree_skb_any(skb); - continue; + *done = 0; + WARN_ON_ONCE(!budget); + while (budget) { + skb = qeth_core_get_next_skb(card, + &card->qdio.in_q->bufs[card->rx.b_index], + &card->rx.b_element, &card->rx.e_offset, &hdr); + if (!skb) { + *done = 1; + break; } - + skb->dev = card->dev; switch (hdr->hdr.l2.id) { case QETH_HEADER_TYPE_LAYER2: skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); - if (card->options.checksum_type == NO_CHECKSUMMING) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb->ip_summed = CHECKSUM_NONE; + skb->ip_summed = CHECKSUM_NONE; if (skb->protocol == htons(ETH_P_802_2)) *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; len = skb->len; - netif_rx(skb); + netif_receive_skb(skb); break; case QETH_HEADER_TYPE_OSN: if (card->info.type == QETH_CARD_TYPE_OSN) { @@ -445,13 +451,91 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card, /* else unknown */ default: dev_kfree_skb_any(skb); - QETH_DBF_TEXT(TRACE, 3, "inbunkno"); + QETH_CARD_TEXT(card, 3, "inbunkno"); QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); continue; } + work_done++; + budget--; card->stats.rx_packets++; card->stats.rx_bytes += len; } + return work_done; +} + +static int qeth_l2_poll(struct napi_struct *napi, int budget) +{ + struct qeth_card *card = container_of(napi, struct qeth_card, napi); + int work_done = 0; + struct qeth_qdio_buffer *buffer; + int done; + int new_budget = budget; + + if (card->options.performance_stats) { + card->perf_stats.inbound_cnt++; + card->perf_stats.inbound_start_time = qeth_get_micros(); + } + + while (1) { + if (!card->rx.b_count) { + card->rx.qdio_err = 0; + card->rx.b_count = qdio_get_next_buffers( + card->data.ccwdev, 0, &card->rx.b_index, + &card->rx.qdio_err); + if (card->rx.b_count <= 0) { + card->rx.b_count = 0; + break; + } + card->rx.b_element = + &card->qdio.in_q->bufs[card->rx.b_index] + .buffer->element[0]; + card->rx.e_offset = 0; + } + + while (card->rx.b_count) { + buffer = &card->qdio.in_q->bufs[card->rx.b_index]; + if (!(card->rx.qdio_err && + qeth_check_qdio_errors(card, buffer->buffer, + card->rx.qdio_err, "qinerr"))) + work_done += qeth_l2_process_inbound_buffer( + card, new_budget, &done); + else + done = 1; + + if (done) { + if (card->options.performance_stats) + card->perf_stats.bufs_rec++; + qeth_put_buffer_pool_entry(card, + buffer->pool_entry); + qeth_queue_input_buffer(card, card->rx.b_index); + card->rx.b_count--; + if (card->rx.b_count) { + card->rx.b_index = + (card->rx.b_index + 1) % + QDIO_MAX_BUFFERS_PER_Q; + card->rx.b_element = + &card->qdio.in_q + ->bufs[card->rx.b_index] + .buffer->element[0]; + card->rx.e_offset = 0; + } + } + + if (work_done >= budget) + goto out; + else + new_budget = budget - work_done; + } + } + + napi_complete(napi); + if (qdio_start_irq(card->data.ccwdev, 0)) + napi_schedule(&card->napi); +out: + if (card->options.performance_stats) + card->perf_stats.inbound_time += qeth_get_micros() - + card->perf_stats.inbound_start_time; + return work_done; } static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, @@ -463,7 +547,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, struct qeth_ipa_cmd *cmd; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 2, "L2sdmac"); + QETH_CARD_TEXT(card, 2, "L2sdmac"); iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; @@ -477,28 +561,27 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 2, "L2Smaccb"); + QETH_CARD_TEXT(card, 2, "L2Smaccb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code) { - QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; switch (cmd->hdr.return_code) { case IPA_RC_L2_DUP_MAC: case IPA_RC_L2_DUP_LAYER3_MAC: dev_warn(&card->gdev->dev, "MAC address %pM already exists\n", - card->dev->dev_addr); + cmd->data.setdelmac.mac); break; case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: dev_warn(&card->gdev->dev, "MAC address %pM is not authorized\n", - card->dev->dev_addr); + cmd->data.setdelmac.mac); break; default: break; } - cmd->hdr.return_code = -EIO; } else { card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, @@ -512,7 +595,7 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) { - QETH_DBF_TEXT(TRACE, 2, "L2Setmac"); + QETH_CARD_TEXT(card, 2, "L2Setmac"); return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, qeth_l2_send_setmac_cb); } @@ -523,11 +606,10 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb"); + QETH_CARD_TEXT(card, 2, "L2Dmaccb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code) { - QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code); - cmd->hdr.return_code = -EIO; + QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); return 0; } card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; @@ -537,7 +619,7 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card, static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) { - QETH_DBF_TEXT(TRACE, 2, "L2Delmac"); + QETH_CARD_TEXT(card, 2, "L2Delmac"); if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) return 0; return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, @@ -552,14 +634,19 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "doL2init"); QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card)); - rc = qeth_query_setadapterparms(card); - if (rc) { - QETH_DBF_MESSAGE(2, "could not query adapter parameters on " - "device %s: x%x\n", CARD_BUS_ID(card), rc); + if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { + rc = qeth_query_setadapterparms(card); + if (rc) { + QETH_DBF_MESSAGE(2, "could not query adapter " + "parameters on device %s: x%x\n", + CARD_BUS_ID(card), rc); + } } - if ((card->info.type == QETH_CARD_TYPE_IQD) || - (card->info.guestlan)) { + if (card->info.type == QETH_CARD_TYPE_IQD || + card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSX || + card->info.guestlan) { rc = qeth_setadpparms_change_macaddr(card); if (rc) { QETH_DBF_MESSAGE(2, "couldn't get MAC address on " @@ -569,7 +656,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) } QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); } else { - random_ether_addr(card->dev->dev_addr); + eth_random_addr(card->dev->dev_addr); memcpy(card->dev->dev_addr, vendor_pre, 3); } return 0; @@ -581,46 +668,46 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) struct qeth_card *card = dev->ml_priv; int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "setmac"); + QETH_CARD_TEXT(card, 3, "setmac"); if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) { - QETH_DBF_TEXT(TRACE, 3, "setmcINV"); + QETH_CARD_TEXT(card, 3, "setmcINV"); return -EOPNOTSUPP; } - if (card->info.type == QETH_CARD_TYPE_OSN) { - QETH_DBF_TEXT(TRACE, 3, "setmcOSN"); + if (card->info.type == QETH_CARD_TYPE_OSN || + card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSX) { + QETH_CARD_TEXT(card, 3, "setmcTYP"); return -EOPNOTSUPP; } - QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card)); - QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN); + QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN); if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_DBF_TEXT(TRACE, 3, "setmcREC"); + QETH_CARD_TEXT(card, 3, "setmcREC"); return -ERESTARTSYS; } rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); - if (!rc) + if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) rc = qeth_l2_send_setmac(card, addr->sa_data); - return rc; + return rc ? -EINVAL : 0; } static void qeth_l2_set_multicast_list(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - struct dev_addr_list *dm; struct netdev_hw_addr *ha; if (card->info.type == QETH_CARD_TYPE_OSN) return ; - QETH_DBF_TEXT(TRACE, 3, "setmulti"); + QETH_CARD_TEXT(card, 3, "setmulti"); if (qeth_threads_running(card, QETH_RECOVER_THREAD) && (card->state != CARD_STATE_UP)) return; - qeth_l2_del_all_mc(card); + qeth_l2_del_all_mc(card, 1); spin_lock_bh(&card->mclock); - for (dm = dev->mc_list; dm; dm = dm->next) - qeth_l2_add_mc(card, dm->da_addr, 0); + netdev_for_each_mc_addr(ha, dev) + qeth_l2_add_mc(card, ha->addr, 0); netdev_for_each_uc_addr(ha, dev) qeth_l2_add_mc(card, ha->addr, 1); @@ -638,15 +725,20 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) int elements = 0; struct qeth_card *card = dev->ml_priv; struct sk_buff *new_skb = skb; - int ipv = qeth_get_ip_version(skb); int cast_type = qeth_l2_get_cast_type(card, skb); - struct qeth_qdio_out_q *queue = card->qdio.out_qs - [qeth_get_priority_queue(card, skb, ipv, cast_type)]; + struct qeth_qdio_out_q *queue; int tx_bytes = skb->len; int data_offset = -1; int elements_needed = 0; int hd_len = 0; + if (card->qdio.do_prio_queueing || (cast_type && + card->info.is_multicast_different)) + queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb, + qeth_get_ip_version(skb), cast_type)]; + else + queue = card->qdio.out_qs[card->qdio.default_out_queue]; + if ((card->state != CARD_STATE_UP) || !card->lan_online) { card->stats.tx_carrier_errors++; goto tx_drop; @@ -675,7 +767,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; elements_needed++; skb_reset_mac_header(new_skb); - qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type); + qeth_l2_fill_header(card, hdr, new_skb, cast_type); hdr->hdr.l2.pkt_length = new_skb->len; memcpy(((char *)hdr) + sizeof(struct qeth_hdr), skb_mac_header(new_skb), ETH_HLEN); @@ -688,22 +780,24 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) hdr = (struct qeth_hdr *)skb_push(new_skb, sizeof(struct qeth_hdr)); skb_set_mac_header(new_skb, sizeof(struct qeth_hdr)); - qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type); + qeth_l2_fill_header(card, hdr, new_skb, cast_type); } } - elements = qeth_get_elements_no(card, (void *)hdr, new_skb, - elements_needed); + elements = qeth_get_elements_no(card, new_skb, elements_needed); if (!elements) { if (data_offset >= 0) kmem_cache_free(qeth_core_header_cache, hdr); goto tx_drop; } - if (card->info.type != QETH_CARD_TYPE_IQD) + if (card->info.type != QETH_CARD_TYPE_IQD) { + if (qeth_hdr_chk_and_bounce(new_skb, &hdr, + sizeof(struct qeth_hdr_layer2))) + goto tx_drop; rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements); - else + } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, elements, data_offset, hd_len); if (!rc) { @@ -740,78 +834,56 @@ tx_drop: return NETDEV_TX_OK; } -static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, - unsigned int qdio_err, unsigned int queue, - int first_element, int count, unsigned long card_ptr) -{ - struct net_device *net_dev; - struct qeth_card *card; - struct qeth_qdio_buffer *buffer; - int index; - int i; - - card = (struct qeth_card *) card_ptr; - net_dev = card->dev; - if (card->options.performance_stats) { - card->perf_stats.inbound_cnt++; - card->perf_stats.inbound_start_time = qeth_get_micros(); - } - if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 1, "qdinchk"); - QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, - count); - QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); - qeth_schedule_recovery(card); - return; - } - for (i = first_element; i < (first_element + count); ++i) { - index = i % QDIO_MAX_BUFFERS_PER_Q; - buffer = &card->qdio.in_q->bufs[index]; - if (!(qdio_err && - qeth_check_qdio_errors(card, buffer->buffer, qdio_err, - "qinerr"))) - qeth_l2_process_inbound_buffer(card, buffer, index); - /* clear buffer and give back to hardware */ - qeth_put_buffer_pool_entry(card, buffer->pool_entry); - qeth_queue_input_buffer(card, index); - } - if (card->options.performance_stats) - card->perf_stats.inbound_time += qeth_get_micros() - - card->perf_stats.inbound_start_time; -} - -static int qeth_l2_open(struct net_device *dev) +static int __qeth_l2_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; + int rc = 0; - QETH_DBF_TEXT(TRACE, 4, "qethopen"); + QETH_CARD_TEXT(card, 4, "qethopen"); + if (card->state == CARD_STATE_UP) + return rc; if (card->state != CARD_STATE_SOFTSETUP) return -ENODEV; if ((card->info.type != QETH_CARD_TYPE_OSN) && (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) { - QETH_DBF_TEXT(TRACE, 4, "nomacadr"); + QETH_CARD_TEXT(card, 4, "nomacadr"); return -EPERM; } card->data.state = CH_STATE_UP; card->state = CARD_STATE_UP; netif_start_queue(dev); - if (!card->lan_online && netif_carrier_ok(dev)) - netif_carrier_off(dev); - return 0; + if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { + napi_enable(&card->napi); + napi_schedule(&card->napi); + } else + rc = -EIO; + return rc; } +static int qeth_l2_open(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 5, "qethope_"); + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { + QETH_CARD_TEXT(card, 3, "openREC"); + return -ERESTARTSYS; + } + return __qeth_l2_open(dev); +} static int qeth_l2_stop(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 4, "qethstop"); + QETH_CARD_TEXT(card, 4, "qethstop"); netif_tx_disable(dev); - if (card->state == CARD_STATE_UP) + if (card->state == CARD_STATE_UP) { card->state = CARD_STATE_SOFTSETUP; + napi_disable(&card->napi); + } return 0; } @@ -819,14 +891,11 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); + qeth_l2_create_device_attributes(&gdev->dev); INIT_LIST_HEAD(&card->vid_list); INIT_LIST_HEAD(&card->mc_list); card->options.layer2 = 1; - card->discipline.input_handler = (qdio_handler_t *) - qeth_l2_qdio_input_handler; - card->discipline.output_handler = (qdio_handler_t *) - qeth_qdio_output_handler; - card->discipline.recover = qeth_l2_recover; + card->info.hwtrap = 0; return 0; } @@ -834,20 +903,17 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) { struct qeth_card *card = dev_get_drvdata(&cgdev->dev); + qeth_l2_remove_device_attributes(&cgdev->dev); qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); - if (cgdev->state == CCWGROUP_ONLINE) { - card->use_hard_stop = 1; + if (cgdev->state == CCWGROUP_ONLINE) qeth_l2_set_offline(cgdev); - } if (card->dev) { unregister_netdev(card->dev); card->dev = NULL; } - - qeth_l2_del_all_mc(card); return; } @@ -873,7 +939,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l2_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = qeth_l2_set_multicast_list, + .ndo_set_rx_mode = qeth_l2_set_multicast_list, .ndo_do_ioctl = qeth_l2_do_ioctl, .ndo_set_mac_address = qeth_l2_set_mac_address, .ndo_change_mtu = qeth_change_mtu, @@ -885,9 +951,6 @@ static const struct net_device_ops qeth_l2_netdev_ops = { static int qeth_l2_setup_netdev(struct qeth_card *card) { switch (card->info.type) { - case QETH_CARD_TYPE_OSAE: - card->dev = alloc_etherdev(0); - break; case QETH_CARD_TYPE_IQD: card->dev = alloc_netdev(0, "hsi%d", ether_setup); break; @@ -906,14 +969,14 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->watchdog_timeo = QETH_TX_TIMEOUT; card->dev->mtu = card->info.initial_mtu; card->dev->netdev_ops = &qeth_l2_netdev_ops; - if (card->info.type != QETH_CARD_TYPE_OSN) - SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops); - else - SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops); - card->dev->features |= NETIF_F_HW_VLAN_FILTER; + card->dev->ethtool_ops = + (card->info.type != QETH_CARD_TYPE_OSN) ? + &qeth_l2_ethtool_ops : &qeth_l2_osn_ops; + card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; card->info.broadcast_capable = 1; qeth_l2_request_initial_mac(card); SET_NETDEV_DEV(card->dev, &card->gdev->dev); + netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT); return register_netdev(card->dev); } @@ -923,7 +986,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) int rc = 0; enum qeth_card_states recover_flag; - BUG_ON(!card); + mutex_lock(&card->discipline_mutex); + mutex_lock(&card->conf_mutex); QETH_DBF_TEXT(SETUP, 2, "setonlin"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); @@ -934,6 +998,11 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) rc = -ENODEV; goto out_remove; } + qeth_bridgeport_query_support(card); + if (card->options.sbp.supported_funcs) + dev_info(&card->gdev->dev, + "The device represents a HiperSockets Bridge Capable Port\n"); + qeth_trace_features(card); if (!card->dev && qeth_l2_setup_netdev(card)) { rc = -ENODEV; @@ -943,7 +1012,17 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) if (card->info.type != QETH_CARD_TYPE_OSN) qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); + if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { + if (card->info.hwtrap && + qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)) + card->info.hwtrap = 0; + } else + card->info.hwtrap = 0; + + qeth_l2_setup_bridgeport_attrs(card); + card->state = CARD_STATE_HARDSETUP; + memset(&card->rx, 0, sizeof(struct qeth_rx)); qeth_print_status_message(card); /* softsetup */ @@ -956,19 +1035,28 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) dev_warn(&card->gdev->dev, "The LAN is offline\n"); card->lan_online = 0; - return 0; + goto contin; } rc = -ENODEV; goto out_remove; } else card->lan_online = 1; - if (card->info.type != QETH_CARD_TYPE_OSN) { +contin: + if ((card->info.type == QETH_CARD_TYPE_OSD) || + (card->info.type == QETH_CARD_TYPE_OSX)) { /* configure isolation level */ - qeth_set_access_ctrl_online(card); - qeth_l2_process_vlans(card, 0); + rc = qeth_set_access_ctrl_online(card, 0); + if (rc) { + rc = -ENODEV; + goto out_remove; + } } + if (card->info.type != QETH_CARD_TYPE_OSN && + card->info.type != QETH_CARD_TYPE_OSM) + qeth_l2_process_vlans(card); + netif_tx_disable(card->dev); rc = qeth_init_qdio_queues(card); @@ -978,13 +1066,16 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } card->state = CARD_STATE_SOFTSETUP; - netif_carrier_on(card->dev); + if (card->lan_online) + netif_carrier_on(card->dev); + else + netif_carrier_off(card->dev); qeth_set_allowed_threads(card, 0xffffffff, 0); if (recover_flag == CARD_STATE_RECOVER) { if (recovery_mode && card->info.type != QETH_CARD_TYPE_OSN) { - qeth_l2_open(card->dev); + __qeth_l2_open(card->dev); } else { rtnl_lock(); dev_open(card->dev); @@ -995,18 +1086,22 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return 0; out_remove: - card->use_hard_stop = 1; qeth_l2_stop_card(card, 0); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); + qdio_free(CARD_DDEV(card)); if (recover_flag == CARD_STATE_RECOVER) card->state = CARD_STATE_RECOVER; else card->state = CARD_STATE_DOWN; + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return rc; } @@ -1022,12 +1117,18 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, int rc = 0, rc2 = 0, rc3 = 0; enum qeth_card_states recover_flag; + mutex_lock(&card->discipline_mutex); + mutex_lock(&card->conf_mutex); QETH_DBF_TEXT(SETUP, 3, "setoffl"); QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); if (card->dev && netif_carrier_ok(card->dev)) netif_carrier_off(card->dev); recover_flag = card->state; + if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + card->info.hwtrap = 1; + } qeth_l2_stop_card(card, recovery_mode); rc = ccw_device_set_offline(CARD_DDEV(card)); rc2 = ccw_device_set_offline(CARD_WDEV(card)); @@ -1036,10 +1137,13 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, rc = (rc2) ? rc2 : rc3; if (rc) QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + qdio_free(CARD_DDEV(card)); if (recover_flag == CARD_STATE_UP) card->state = CARD_STATE_RECOVER; /* let user_space know that device is offline */ kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return 0; } @@ -1054,29 +1158,26 @@ static int qeth_l2_recover(void *ptr) int rc = 0; card = (struct qeth_card *) ptr; - QETH_DBF_TEXT(TRACE, 2, "recover1"); - QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); + QETH_CARD_TEXT(card, 2, "recover1"); if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) return 0; - QETH_DBF_TEXT(TRACE, 2, "recover2"); + QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); - card->use_hard_stop = 1; + qeth_set_recovery_task(card); __qeth_l2_set_offline(card->gdev, 1); rc = __qeth_l2_set_online(card->gdev, 1); - /* don't run another scheduled recovery */ - qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); - qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); if (!rc) dev_info(&card->gdev->dev, "Device successfully recovered!\n"); else { - rtnl_lock(); - dev_close(card->dev); - rtnl_unlock(); + qeth_close_dev(card); dev_warn(&card->gdev->dev, "The qeth device driver " - "failed to recover an error on the device\n"); + "failed to recover an error on the device\n"); } + qeth_clear_recovery_task(card); + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); return 0; } @@ -1094,8 +1195,12 @@ static void __exit qeth_l2_exit(void) static void qeth_l2_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); + qeth_set_allowed_threads(card, 0, 1); + if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_qdio_clear_card(card, 0); qeth_clear_qdio_buffers(card); + qdio_free(CARD_DDEV(card)); } static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) @@ -1109,7 +1214,8 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) if (gdev->state == CCWGROUP_OFFLINE) return 0; if (card->state == CARD_STATE_UP) { - card->use_hard_stop = 1; + if (card->info.hwtrap) + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); __qeth_l2_set_offline(card->gdev, 1); } else __qeth_l2_set_offline(card->gdev, 0); @@ -1143,8 +1249,32 @@ out: return rc; } -struct ccwgroup_driver qeth_l2_ccwgroup_driver = { - .probe = qeth_l2_probe_device, +/* Returns zero if the command is successfully "consumed" */ +static int qeth_l2_control_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + switch (cmd->hdr.command) { + case IPA_CMD_SETBRIDGEPORT: + if (cmd->data.sbp.hdr.command_code == + IPA_SBP_BRIDGE_PORT_STATE_CHANGE) { + qeth_bridge_state_change(card, cmd); + return 0; + } else + return 1; + case IPA_CMD_ADDRESS_CHANGE_NOTIF: + qeth_bridge_host_event(card, cmd); + return 0; + default: + return 1; + } +} + +struct qeth_discipline qeth_l2_discipline = { + .start_poll = qeth_qdio_start_poll, + .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, + .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, + .recover = qeth_l2_recover, + .setup = qeth_l2_probe_device, .remove = qeth_l2_remove_device, .set_online = qeth_l2_set_online, .set_offline = qeth_l2_set_offline, @@ -1152,8 +1282,9 @@ struct ccwgroup_driver qeth_l2_ccwgroup_driver = { .freeze = qeth_l2_pm_suspend, .thaw = qeth_l2_pm_resume, .restore = qeth_l2_pm_resume, + .control_event_handler = qeth_l2_control_event, }; -EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver); +EXPORT_SYMBOL_GPL(qeth_l2_discipline); static int qeth_osn_send_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) @@ -1161,12 +1292,12 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, unsigned long flags; int rc = 0; - QETH_DBF_TEXT(TRACE, 5, "osndctrd"); + QETH_CARD_TEXT(card, 5, "osndctrd"); wait_event(card->wait_q, atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); qeth_prepare_control_data(card, len, iob); - QETH_DBF_TEXT(TRACE, 6, "osnoirqp"); + QETH_CARD_TEXT(card, 6, "osnoirqp"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, (addr_t) iob, 0, 0); @@ -1174,7 +1305,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " "ccw_device_start rc = %i\n", rc); - QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); + QETH_CARD_TEXT_(card, 2, " err%d", rc); qeth_release_buffer(iob->channel, iob); atomic_set(&card->write.irq_pending, 0); wake_up(&card->wait_q); @@ -1187,7 +1318,7 @@ static int qeth_osn_send_ipa_cmd(struct qeth_card *card, { u16 s1, s2; - QETH_DBF_TEXT(TRACE, 4, "osndipa"); + QETH_CARD_TEXT(card, 4, "osndipa"); qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2); s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); @@ -1205,12 +1336,12 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len) struct qeth_card *card; int rc; - QETH_DBF_TEXT(TRACE, 2, "osnsdmc"); if (!dev) return -ENODEV; card = dev->ml_priv; if (!card) return -ENODEV; + QETH_CARD_TEXT(card, 2, "osnsdmc"); if ((card->state != CARD_STATE_UP) && (card->state != CARD_STATE_SOFTSETUP)) return -ENODEV; @@ -1227,13 +1358,13 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev, { struct qeth_card *card; - QETH_DBF_TEXT(TRACE, 2, "osnreg"); *dev = qeth_l2_netdev_by_devno(read_dev_no); if (*dev == NULL) return -ENODEV; card = (*dev)->ml_priv; if (!card) return -ENODEV; + QETH_CARD_TEXT(card, 2, "osnreg"); if ((assist_cb == NULL) || (data_cb == NULL)) return -EINVAL; card->osn_info.assist_cb = assist_cb; @@ -1246,18 +1377,606 @@ void qeth_osn_deregister(struct net_device *dev) { struct qeth_card *card; - QETH_DBF_TEXT(TRACE, 2, "osndereg"); if (!dev) return; card = dev->ml_priv; if (!card) return; + QETH_CARD_TEXT(card, 2, "osndereg"); card->osn_info.assist_cb = NULL; card->osn_info.data_cb = NULL; return; } EXPORT_SYMBOL(qeth_osn_deregister); +/* SETBRIDGEPORT support, async notifications */ + +enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset}; + +/** + * qeth_bridge_emit_host_event() - bridgeport address change notification + * @card: qeth_card structure pointer, for udev events. + * @evtype: "normal" register/unregister, or abort, or reset. For abort + * and reset token and addr_lnid are unused and may be NULL. + * @code: event bitmask: high order bit 0x80 value 1 means removal of an + * object, 0 - addition of an object. + * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC. + * @token: "network token" structure identifying physical address of the port. + * @addr_lnid: pointer to structure with MAC address and VLAN ID. + * + * This function is called when registrations and deregistrations are + * reported by the hardware, and also when notifications are enabled - + * for all currently registered addresses. + */ +static void qeth_bridge_emit_host_event(struct qeth_card *card, + enum qeth_an_event_type evtype, + u8 code, struct net_if_token *token, struct mac_addr_lnid *addr_lnid) +{ + char str[7][32]; + char *env[8]; + int i = 0; + + switch (evtype) { + case anev_reg_unreg: + snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s", + (code & IPA_ADDR_CHANGE_CODE_REMOVAL) + ? "deregister" : "register"); + env[i] = str[i]; i++; + if (code & IPA_ADDR_CHANGE_CODE_VLANID) { + snprintf(str[i], sizeof(str[i]), "VLAN=%d", + addr_lnid->lnid); + env[i] = str[i]; i++; + } + if (code & IPA_ADDR_CHANGE_CODE_MACADDR) { + snprintf(str[i], sizeof(str[i]), "MAC=%pM6", + &addr_lnid->mac); + env[i] = str[i]; i++; + } + snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x", + token->cssid, token->ssid, token->devnum); + env[i] = str[i]; i++; + snprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid); + env[i] = str[i]; i++; + snprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x", + token->chpid); + env[i] = str[i]; i++; + snprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x", token->chid); + env[i] = str[i]; i++; + break; + case anev_abort: + snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort"); + env[i] = str[i]; i++; + break; + case anev_reset: + snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset"); + env[i] = str[i]; i++; + break; + } + env[i] = NULL; + kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env); +} + +struct qeth_bridge_state_data { + struct work_struct worker; + struct qeth_card *card; + struct qeth_sbp_state_change qports; +}; + +static void qeth_bridge_state_change_worker(struct work_struct *work) +{ + struct qeth_bridge_state_data *data = + container_of(work, struct qeth_bridge_state_data, worker); + /* We are only interested in the first entry - local port */ + struct qeth_sbp_port_entry *entry = &data->qports.entry[0]; + char env_locrem[32]; + char env_role[32]; + char env_state[32]; + char *env[] = { + env_locrem, + env_role, + env_state, + NULL + }; + + /* Role should not change by itself, but if it did, */ + /* information from the hardware is authoritative. */ + mutex_lock(&data->card->conf_mutex); + data->card->options.sbp.role = entry->role; + mutex_unlock(&data->card->conf_mutex); + + snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); + snprintf(env_role, sizeof(env_role), "ROLE=%s", + (entry->role == QETH_SBP_ROLE_NONE) ? "none" : + (entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" : + (entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" : + "<INVALID>"); + snprintf(env_state, sizeof(env_state), "STATE=%s", + (entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" : + (entry->state == QETH_SBP_STATE_STANDBY) ? "standby" : + (entry->state == QETH_SBP_STATE_ACTIVE) ? "active" : + "<INVALID>"); + kobject_uevent_env(&data->card->gdev->dev.kobj, + KOBJ_CHANGE, env); + kfree(data); +} + +static void qeth_bridge_state_change(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + struct qeth_sbp_state_change *qports = + &cmd->data.sbp.data.state_change; + struct qeth_bridge_state_data *data; + int extrasize; + + QETH_CARD_TEXT(card, 2, "brstchng"); + if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { + QETH_CARD_TEXT_(card, 2, "BPsz%.8d", qports->entry_length); + return; + } + extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries; + data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize, + GFP_ATOMIC); + if (!data) { + QETH_CARD_TEXT(card, 2, "BPSalloc"); + return; + } + INIT_WORK(&data->worker, qeth_bridge_state_change_worker); + data->card = card; + memcpy(&data->qports, qports, + sizeof(struct qeth_sbp_state_change) + extrasize); + queue_work(qeth_wq, &data->worker); +} + +struct qeth_bridge_host_data { + struct work_struct worker; + struct qeth_card *card; + struct qeth_ipacmd_addr_change hostevs; +}; + +static void qeth_bridge_host_event_worker(struct work_struct *work) +{ + struct qeth_bridge_host_data *data = + container_of(work, struct qeth_bridge_host_data, worker); + int i; + + if (data->hostevs.lost_event_mask) { + dev_info(&data->card->gdev->dev, +"Address notification from the HiperSockets Bridge Port stopped %s (%s)\n", + data->card->dev->name, + (data->hostevs.lost_event_mask == 0x01) + ? "Overflow" + : (data->hostevs.lost_event_mask == 0x02) + ? "Bridge port state change" + : "Unknown reason"); + mutex_lock(&data->card->conf_mutex); + data->card->options.sbp.hostnotification = 0; + mutex_unlock(&data->card->conf_mutex); + qeth_bridge_emit_host_event(data->card, anev_abort, + 0, NULL, NULL); + } else + for (i = 0; i < data->hostevs.num_entries; i++) { + struct qeth_ipacmd_addr_change_entry *entry = + &data->hostevs.entry[i]; + qeth_bridge_emit_host_event(data->card, + anev_reg_unreg, + entry->change_code, + &entry->token, &entry->addr_lnid); + } + kfree(data); +} + +static void qeth_bridge_host_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + struct qeth_ipacmd_addr_change *hostevs = + &cmd->data.addrchange; + struct qeth_bridge_host_data *data; + int extrasize; + + QETH_CARD_TEXT(card, 2, "brhostev"); + if (cmd->hdr.return_code != 0x0000) { + if (cmd->hdr.return_code == 0x0010) { + if (hostevs->lost_event_mask == 0x00) + hostevs->lost_event_mask = 0xff; + } else { + QETH_CARD_TEXT_(card, 2, "BPHe%04x", + cmd->hdr.return_code); + return; + } + } + extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) * + hostevs->num_entries; + data = kzalloc(sizeof(struct qeth_bridge_host_data) + extrasize, + GFP_ATOMIC); + if (!data) { + QETH_CARD_TEXT(card, 2, "BPHalloc"); + return; + } + INIT_WORK(&data->worker, qeth_bridge_host_event_worker); + data->card = card; + memcpy(&data->hostevs, hostevs, + sizeof(struct qeth_ipacmd_addr_change) + extrasize); + queue_work(qeth_wq, &data->worker); +} + +/* SETBRIDGEPORT support; sending commands */ + +struct _qeth_sbp_cbctl { + u16 ipa_rc; + u16 cmd_rc; + union { + u32 supported; + struct { + enum qeth_sbp_roles *role; + enum qeth_sbp_states *state; + } qports; + } data; +}; + +/** + * qeth_bridgeport_makerc() - derive "traditional" error from hardware codes. + * @card: qeth_card structure pointer, for debug messages. + * @cbctl: state structure with hardware return codes. + * @setcmd: IPA command code + * + * Returns negative errno-compatible error indication or 0 on success. + */ +static int qeth_bridgeport_makerc(struct qeth_card *card, + struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd) +{ + int rc; + + switch (cbctl->ipa_rc) { + case IPA_RC_SUCCESS: + switch (cbctl->cmd_rc) { + case 0x0000: + rc = 0; + break; + case 0x0004: + rc = -ENOSYS; + break; + case 0x000C: /* Not configured as bridge Port */ + rc = -ENODEV; /* maybe not the best code here? */ + dev_err(&card->gdev->dev, + "The HiperSockets device is not configured as a Bridge Port\n"); + break; + case 0x0014: /* Another device is Primary */ + switch (setcmd) { + case IPA_SBP_SET_PRIMARY_BRIDGE_PORT: + rc = -EEXIST; + dev_err(&card->gdev->dev, + "The HiperSockets LAN already has a primary Bridge Port\n"); + break; + case IPA_SBP_SET_SECONDARY_BRIDGE_PORT: + rc = -EBUSY; + dev_err(&card->gdev->dev, + "The HiperSockets device is already a primary Bridge Port\n"); + break; + default: + rc = -EIO; + } + break; + case 0x0018: /* This device is currently Secondary */ + rc = -EBUSY; + dev_err(&card->gdev->dev, + "The HiperSockets device is already a secondary Bridge Port\n"); + break; + case 0x001C: /* Limit for Secondary devices reached */ + rc = -EEXIST; + dev_err(&card->gdev->dev, + "The HiperSockets LAN cannot have more secondary Bridge Ports\n"); + break; + case 0x0024: /* This device is currently Primary */ + rc = -EBUSY; + dev_err(&card->gdev->dev, + "The HiperSockets device is already a primary Bridge Port\n"); + break; + case 0x0020: /* Not authorized by zManager */ + rc = -EACCES; + dev_err(&card->gdev->dev, + "The HiperSockets device is not authorized to be a Bridge Port\n"); + break; + default: + rc = -EIO; + } + break; + case IPA_RC_NOTSUPP: + rc = -ENOSYS; + break; + case IPA_RC_UNSUPPORTED_COMMAND: + rc = -ENOSYS; + break; + default: + rc = -EIO; + } + if (rc) { + QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc); + QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc); + } + return rc; +} + +static int qeth_bridgeport_query_support_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + QETH_CARD_TEXT(card, 2, "brqsupcb"); + cbctl->ipa_rc = cmd->hdr.return_code; + cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; + if ((cbctl->ipa_rc == 0) && (cbctl->cmd_rc == 0)) { + cbctl->data.supported = + cmd->data.sbp.data.query_cmds_supp.supported_cmds; + } else { + cbctl->data.supported = 0; + } + return 0; +} + +/** + * qeth_bridgeport_query_support() - store bitmask of supported subfunctions. + * @card: qeth_card structure pointer. + * + * Sets bitmask of supported setbridgeport subfunctions in the qeth_card + * strucutre: card->options.sbp.supported_funcs. + */ +static void qeth_bridgeport_query_support(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct _qeth_sbp_cbctl cbctl; + + QETH_CARD_TEXT(card, 2, "brqsuppo"); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.sbp.hdr.cmdlength = + sizeof(struct qeth_ipacmd_sbp_hdr) + + sizeof(struct qeth_sbp_query_cmds_supp); + cmd->data.sbp.hdr.command_code = + IPA_SBP_QUERY_COMMANDS_SUPPORTED; + cmd->data.sbp.hdr.used_total = 1; + cmd->data.sbp.hdr.seq_no = 1; + if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb, + (void *)&cbctl) || + qeth_bridgeport_makerc(card, &cbctl, + IPA_SBP_QUERY_COMMANDS_SUPPORTED)) { + /* non-zero makerc signifies failure, and produce messages */ + card->options.sbp.role = QETH_SBP_ROLE_NONE; + return; + } + card->options.sbp.supported_funcs = cbctl.data.supported; +} + +static int qeth_bridgeport_query_ports_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports; + struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + + QETH_CARD_TEXT(card, 2, "brqprtcb"); + cbctl->ipa_rc = cmd->hdr.return_code; + cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; + if ((cbctl->ipa_rc != 0) || (cbctl->cmd_rc != 0)) + return 0; + if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { + cbctl->cmd_rc = 0xffff; + QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length); + return 0; + } + /* first entry contains the state of the local port */ + if (qports->num_entries > 0) { + if (cbctl->data.qports.role) + *cbctl->data.qports.role = qports->entry[0].role; + if (cbctl->data.qports.state) + *cbctl->data.qports.state = qports->entry[0].state; + } + return 0; +} + +/** + * qeth_bridgeport_query_ports() - query local bridgeport status. + * @card: qeth_card structure pointer. + * @role: Role of the port: 0-none, 1-primary, 2-secondary. + * @state: State of the port: 0-inactive, 1-standby, 2-active. + * + * Returns negative errno-compatible error indication or 0 on success. + * + * 'role' and 'state' are not updated in case of hardware operation failure. + */ +int qeth_bridgeport_query_ports(struct qeth_card *card, + enum qeth_sbp_roles *role, enum qeth_sbp_states *state) +{ + int rc = 0; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct _qeth_sbp_cbctl cbctl = { + .data = { + .qports = { + .role = role, + .state = state, + }, + }, + }; + + QETH_CARD_TEXT(card, 2, "brqports"); + if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) + return -EOPNOTSUPP; + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.sbp.hdr.cmdlength = + sizeof(struct qeth_ipacmd_sbp_hdr); + cmd->data.sbp.hdr.command_code = + IPA_SBP_QUERY_BRIDGE_PORTS; + cmd->data.sbp.hdr.used_total = 1; + cmd->data.sbp.hdr.seq_no = 1; + rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, + (void *)&cbctl); + if (rc) + return rc; + rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); + if (rc) + return rc; + return 0; +} +EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); + +static int qeth_bridgeport_set_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; + struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + QETH_CARD_TEXT(card, 2, "brsetrcb"); + cbctl->ipa_rc = cmd->hdr.return_code; + cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; + return 0; +} + +/** + * qeth_bridgeport_setrole() - Assign primary role to the port. + * @card: qeth_card structure pointer. + * @role: Role to assign. + * + * Returns negative errno-compatible error indication or 0 on success. + */ +int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) +{ + int rc = 0; + int cmdlength; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct _qeth_sbp_cbctl cbctl; + enum qeth_ipa_sbp_cmd setcmd; + + QETH_CARD_TEXT(card, 2, "brsetrol"); + switch (role) { + case QETH_SBP_ROLE_NONE: + setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE; + cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + + sizeof(struct qeth_sbp_reset_role); + break; + case QETH_SBP_ROLE_PRIMARY: + setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT; + cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + + sizeof(struct qeth_sbp_set_primary); + break; + case QETH_SBP_ROLE_SECONDARY: + setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT; + cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + + sizeof(struct qeth_sbp_set_secondary); + break; + default: + return -EINVAL; + } + if (!(card->options.sbp.supported_funcs & setcmd)) + return -EOPNOTSUPP; + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.sbp.hdr.cmdlength = cmdlength; + cmd->data.sbp.hdr.command_code = setcmd; + cmd->data.sbp.hdr.used_total = 1; + cmd->data.sbp.hdr.seq_no = 1; + rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, + (void *)&cbctl); + if (rc) + return rc; + rc = qeth_bridgeport_makerc(card, &cbctl, setcmd); + return rc; +} + +/** + * qeth_anset_makerc() - derive "traditional" error from hardware codes. + * @card: qeth_card structure pointer, for debug messages. + * + * Returns negative errno-compatible error indication or 0 on success. + */ +static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response) +{ + int rc; + + if (pnso_rc == 0) + switch (response) { + case 0x0001: + rc = 0; + break; + case 0x0004: + case 0x0100: + case 0x0106: + rc = -ENOSYS; + dev_err(&card->gdev->dev, + "Setting address notification failed\n"); + break; + case 0x0107: + rc = -EAGAIN; + break; + default: + rc = -EIO; + } + else + rc = -EIO; + + if (rc) { + QETH_CARD_TEXT_(card, 2, "SBPp%04x", pnso_rc); + QETH_CARD_TEXT_(card, 2, "SBPr%04x", response); + } + return rc; +} + +static void qeth_bridgeport_an_set_cb(void *priv, + enum qdio_brinfo_entry_type type, void *entry) +{ + struct qeth_card *card = (struct qeth_card *)priv; + struct qdio_brinfo_entry_l2 *l2entry; + u8 code; + + if (type != l2_addr_lnid) { + WARN_ON_ONCE(1); + return; + } + + l2entry = (struct qdio_brinfo_entry_l2 *)entry; + code = IPA_ADDR_CHANGE_CODE_MACADDR; + if (l2entry->addr_lnid.lnid) + code |= IPA_ADDR_CHANGE_CODE_VLANID; + qeth_bridge_emit_host_event(card, anev_reg_unreg, code, + (struct net_if_token *)&l2entry->nit, + (struct mac_addr_lnid *)&l2entry->addr_lnid); +} + +/** + * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification + * @card: qeth_card structure pointer. + * @enable: 0 - disable, non-zero - enable notifications + * + * Returns negative errno-compatible error indication or 0 on success. + * + * On enable, emits a series of address notifications udev events for all + * currently registered hosts. + */ +int qeth_bridgeport_an_set(struct qeth_card *card, int enable) +{ + int rc; + u16 response; + struct ccw_device *ddev; + struct subchannel_id schid; + + if (!card) + return -EINVAL; + if (!card->options.sbp.supported_funcs) + return -EOPNOTSUPP; + ddev = CARD_DDEV(card); + ccw_device_get_schid(ddev, &schid); + + if (enable) { + qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL); + rc = qdio_pnso_brinfo(schid, 1, &response, + qeth_bridgeport_an_set_cb, card); + } else + rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL); + return qeth_anset_makerc(card, rc, response); +} +EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set); + module_init(qeth_l2_init); module_exit(qeth_l2_exit); MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c new file mode 100644 index 00000000000..ae1bc04b865 --- /dev/null +++ b/drivers/s390/net/qeth_l2_sys.c @@ -0,0 +1,223 @@ +/* + * Copyright IBM Corp. 2013 + * Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com> + */ + +#include <linux/slab.h> +#include <asm/ebcdic.h> +#include "qeth_l2.h" + +#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ +struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) + +static int qeth_card_hw_is_reachable(struct qeth_card *card) +{ + return (card->state == CARD_STATE_SOFTSETUP) || + (card->state == CARD_STATE_UP); +} + +static ssize_t qeth_bridge_port_role_state_show(struct device *dev, + struct device_attribute *attr, char *buf, + int show_state) +{ + struct qeth_card *card = dev_get_drvdata(dev); + enum qeth_sbp_states state = QETH_SBP_STATE_INACTIVE; + int rc = 0; + char *word; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + + if (qeth_card_hw_is_reachable(card) && + card->options.sbp.supported_funcs) + rc = qeth_bridgeport_query_ports(card, + &card->options.sbp.role, &state); + if (!rc) { + if (show_state) + switch (state) { + case QETH_SBP_STATE_INACTIVE: + word = "inactive"; break; + case QETH_SBP_STATE_STANDBY: + word = "standby"; break; + case QETH_SBP_STATE_ACTIVE: + word = "active"; break; + default: + rc = -EIO; + } + else + switch (card->options.sbp.role) { + case QETH_SBP_ROLE_NONE: + word = "none"; break; + case QETH_SBP_ROLE_PRIMARY: + word = "primary"; break; + case QETH_SBP_ROLE_SECONDARY: + word = "secondary"; break; + default: + rc = -EIO; + } + if (rc) + QETH_CARD_TEXT_(card, 2, "SBP%02x:%02x", + card->options.sbp.role, state); + else + rc = sprintf(buf, "%s\n", word); + } + + mutex_unlock(&card->conf_mutex); + + return rc; +} + +static ssize_t qeth_bridge_port_role_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return qeth_bridge_port_role_state_show(dev, attr, buf, 0); +} + +static ssize_t qeth_bridge_port_role_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + enum qeth_sbp_roles role; + + if (!card) + return -EINVAL; + if (sysfs_streq(buf, "primary")) + role = QETH_SBP_ROLE_PRIMARY; + else if (sysfs_streq(buf, "secondary")) + role = QETH_SBP_ROLE_SECONDARY; + else if (sysfs_streq(buf, "none")) + role = QETH_SBP_ROLE_NONE; + else + return -EINVAL; + + mutex_lock(&card->conf_mutex); + + if (qeth_card_hw_is_reachable(card)) { + rc = qeth_bridgeport_setrole(card, role); + if (!rc) + card->options.sbp.role = role; + } else + card->options.sbp.role = role; + + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(bridge_role, 0644, qeth_bridge_port_role_show, + qeth_bridge_port_role_store); + +static ssize_t qeth_bridge_port_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return qeth_bridge_port_role_state_show(dev, attr, buf, 1); +} + +static DEVICE_ATTR(bridge_state, 0644, qeth_bridge_port_state_show, + NULL); + +static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int enabled; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + + enabled = card->options.sbp.hostnotification; + + mutex_unlock(&card->conf_mutex); + + return sprintf(buf, "%d\n", enabled); +} + +static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + int enable; + + if (!card) + return -EINVAL; + + if (sysfs_streq(buf, "0")) + enable = 0; + else if (sysfs_streq(buf, "1")) + enable = 1; + else + return -EINVAL; + + mutex_lock(&card->conf_mutex); + + if (qeth_card_hw_is_reachable(card)) { + rc = qeth_bridgeport_an_set(card, enable); + if (!rc) + card->options.sbp.hostnotification = enable; + } else + card->options.sbp.hostnotification = enable; + + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(bridge_hostnotify, 0644, + qeth_bridgeport_hostnotification_show, + qeth_bridgeport_hostnotification_store); + +static struct attribute *qeth_l2_bridgeport_attrs[] = { + &dev_attr_bridge_role.attr, + &dev_attr_bridge_state.attr, + &dev_attr_bridge_hostnotify.attr, + NULL, +}; + +static struct attribute_group qeth_l2_bridgeport_attr_group = { + .attrs = qeth_l2_bridgeport_attrs, +}; + +int qeth_l2_create_device_attributes(struct device *dev) +{ + return sysfs_create_group(&dev->kobj, &qeth_l2_bridgeport_attr_group); +} + +void qeth_l2_remove_device_attributes(struct device *dev) +{ + sysfs_remove_group(&dev->kobj, &qeth_l2_bridgeport_attr_group); +} + +/** + * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online. + * @card: qeth_card structure pointer + * + * Note: this function is called with conf_mutex held by the caller + */ +void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) +{ + int rc; + + if (!card) + return; + if (!card->options.sbp.supported_funcs) + return; + if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { + /* Conditional to avoid spurious error messages */ + qeth_bridgeport_setrole(card, card->options.sbp.role); + /* Let the callback function refresh the stored role value. */ + qeth_bridgeport_query_ports(card, + &card->options.sbp.role, NULL); + } + if (card->options.sbp.hostnotification) { + rc = qeth_bridgeport_an_set(card, 1); + if (rc) + card->options.sbp.hostnotification = 0; + } else + qeth_bridgeport_an_set(card, 0); +} diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 8447d233d0b..29c1c00e3a0 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -1,6 +1,4 @@ /* - * drivers/s390/net/qeth_l3.h - * * Copyright IBM Corp. 2007 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, @@ -62,7 +60,10 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *); int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, const u8 *); -int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types); -int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types); +int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); +struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); +int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); +int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); +void qeth_l3_set_ip_addr_list(struct qeth_card *); #endif /* __QETH_L3_H__ */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b3b6e872d80..14e0b5810e8 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/qeth_l3_main.c - * * Copyright IBM Corp. 2007, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, @@ -13,6 +11,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/bitops.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -22,13 +21,19 @@ #include <linux/ipv6.h> #include <linux/inetdevice.h> #include <linux/igmp.h> +#include <linux/slab.h> +#include <linux/if_vlan.h> #include <net/ip.h> #include <net/arp.h> +#include <net/route.h> +#include <net/ip6_fib.h> #include <net/ip6_checksum.h> +#include <net/iucv/af_iucv.h> #include "qeth_l3.h" + static int qeth_l3_set_offline(struct ccwgroup_device *); static int qeth_l3_recover(void *); static int qeth_l3_stop(struct net_device *); @@ -41,33 +46,6 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *, static int __qeth_l3_set_online(struct ccwgroup_device *, int); static int __qeth_l3_set_offline(struct ccwgroup_device *, int); -int qeth_l3_set_large_send(struct qeth_card *card, - enum qeth_large_send_types type) -{ - int rc = 0; - - card->options.large_send = type; - if (card->dev == NULL) - return 0; - - if (card->options.large_send == QETH_LARGE_SEND_TSO) { - if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { - card->dev->features |= NETIF_F_TSO | NETIF_F_SG | - NETIF_F_HW_CSUM; - } else { - card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | - NETIF_F_HW_CSUM); - card->options.large_send = QETH_LARGE_SEND_NO; - rc = -EOPNOTSUPP; - } - } else { - card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | - NETIF_F_HW_CSUM); - card->options.large_send = QETH_LARGE_SEND_NO; - } - return rc; -} - static int qeth_l3_isxdigit(char *buf) { while (*buf) { @@ -85,7 +63,7 @@ void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) { int count = 0, rc = 0; - int in[4]; + unsigned int in[4]; char c; rc = sscanf(buf, "%u.%u.%u.%u%c", @@ -102,12 +80,7 @@ int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) { - sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x" - ":%02x%02x:%02x%02x:%02x%02x:%02x%02x", - addr[0], addr[1], addr[2], addr[3], - addr[4], addr[5], addr[6], addr[7], - addr[8], addr[9], addr[10], addr[11], - addr[12], addr[13], addr[14], addr[15]); + sprintf(buf, "%pI6", addr); } int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) @@ -194,7 +167,7 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) } } -static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, +int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr) { struct qeth_ipato_entry *ipatoe; @@ -286,7 +259,7 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card, addr->users += add ? 1 : -1; if (add && (addr->type == QETH_IP_TYPE_NORMAL) && qeth_l3_is_addr_covered_by_ipato(card, addr)) { - QETH_DBF_TEXT(TRACE, 2, "tkovaddr"); + QETH_CARD_TEXT(card, 2, "tkovaddr"); addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; } list_add_tail(&addr->entry, card->ip_tbd_list); @@ -295,18 +268,18 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card, } } -static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) +int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) { unsigned long flags; int rc = 0; - QETH_DBF_TEXT(TRACE, 4, "delip"); + QETH_CARD_TEXT(card, 4, "delip"); if (addr->proto == QETH_PROT_IPV4) - QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); + QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4); else { - QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); - QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); + QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8); } spin_lock_irqsave(&card->ip_lock, flags); rc = __qeth_l3_insert_ip_todo(card, addr, 0); @@ -314,17 +287,17 @@ static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) return rc; } -static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) +int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) { unsigned long flags; int rc = 0; - QETH_DBF_TEXT(TRACE, 4, "addip"); + QETH_CARD_TEXT(card, 4, "addip"); if (addr->proto == QETH_PROT_IPV4) - QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); + QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4); else { - QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); - QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); + QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8); } spin_lock_irqsave(&card->ip_lock, flags); rc = __qeth_l3_insert_ip_todo(card, addr, 1); @@ -333,7 +306,7 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) } -static struct qeth_ipaddr *qeth_l3_get_addr_buffer( +struct qeth_ipaddr *qeth_l3_get_addr_buffer( enum qeth_prot_versions prot) { struct qeth_ipaddr *addr; @@ -352,10 +325,10 @@ static void qeth_l3_delete_mc_addresses(struct qeth_card *card) struct qeth_ipaddr *iptodo; unsigned long flags; - QETH_DBF_TEXT(TRACE, 4, "delmc"); + QETH_CARD_TEXT(card, 4, "delmc"); iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); if (!iptodo) { - QETH_DBF_TEXT(TRACE, 2, "dmcnomem"); + QETH_CARD_TEXT(card, 2, "dmcnomem"); return; } iptodo->type = QETH_IP_TYPE_DEL_ALL_MC; @@ -449,23 +422,26 @@ again: list_splice(&fail_list, &card->ip_list); } -static void qeth_l3_set_ip_addr_list(struct qeth_card *card) +void qeth_l3_set_ip_addr_list(struct qeth_card *card) { struct list_head *tbd_list; struct qeth_ipaddr *todo, *addr; unsigned long flags; int rc; - QETH_DBF_TEXT(TRACE, 2, "sdiplist"); - QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); + QETH_CARD_TEXT(card, 2, "sdiplist"); + QETH_CARD_HEX(card, 2, &card, sizeof(void *)); - if (card->options.sniffer) + if ((card->state != CARD_STATE_UP && + card->state != CARD_STATE_SOFTSETUP) || card->options.sniffer) { return; + } + spin_lock_irqsave(&card->ip_lock, flags); tbd_list = card->ip_tbd_list; - card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); + card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_ATOMIC); if (!card->ip_tbd_list) { - QETH_DBF_TEXT(TRACE, 0, "silnomem"); + QETH_CARD_TEXT(card, 0, "silnomem"); card->ip_tbd_list = tbd_list; spin_unlock_irqrestore(&card->ip_lock, flags); return; @@ -510,13 +486,12 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card) kfree(tbd_list); } -static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, - int recover) +static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover) { struct qeth_ipaddr *addr, *tmp; unsigned long flags; - QETH_DBF_TEXT(TRACE, 4, "clearip"); + QETH_CARD_TEXT(card, 4, "clearip"); if (recover && card->options.sniffer) return; spin_lock_irqsave(&card->ip_lock, flags); @@ -530,11 +505,6 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, addr = list_entry(card->ip_list.next, struct qeth_ipaddr, entry); list_del_init(&addr->entry); - if (clean) { - spin_unlock_irqrestore(&card->ip_lock, flags); - qeth_l3_deregister_addr_entry(card, addr); - spin_lock_irqsave(&card->ip_lock, flags); - } if (!recover || addr->is_multicast) { kfree(addr); continue; @@ -576,7 +546,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "setdelmc"); + QETH_CARD_TEXT(card, 4, "setdelmc"); iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); @@ -614,8 +584,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, struct qeth_ipa_cmd *cmd; __u8 netmask[16]; - QETH_DBF_TEXT(TRACE, 4, "setdelip"); - QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags); + QETH_CARD_TEXT(card, 4, "setdelip"); + QETH_CARD_TEXT_(card, 4, "flags%02X", flags); iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); @@ -644,7 +614,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, struct qeth_ipa_cmd *cmd; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 4, "setroutg"); + QETH_CARD_TEXT(card, 4, "setroutg"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setrtg.type = (type); @@ -653,7 +623,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, return rc; } -static void qeth_l3_correct_routing_type(struct qeth_card *card, +static int qeth_l3_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type, enum qeth_prot_versions prot) { if (card->info.type == QETH_CARD_TYPE_IQD) { @@ -662,7 +632,7 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, case PRIMARY_CONNECTOR: case SECONDARY_CONNECTOR: case MULTICAST_ROUTER: - return; + return 0; default: goto out_inval; } @@ -671,27 +641,30 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, case NO_ROUTER: case PRIMARY_ROUTER: case SECONDARY_ROUTER: - return; + return 0; case MULTICAST_ROUTER: if (qeth_is_ipafunc_supported(card, prot, IPA_OSA_MC_ROUTER)) - return; + return 0; default: goto out_inval; } } out_inval: *type = NO_ROUTER; + return -EINVAL; } int qeth_l3_setrouting_v4(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "setrtg4"); + QETH_CARD_TEXT(card, 3, "setrtg4"); - qeth_l3_correct_routing_type(card, &card->options.route4.type, + rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, QETH_PROT_IPV4); + if (rc) + return rc; rc = qeth_l3_send_setrouting(card, card->options.route4.type, QETH_PROT_IPV4); @@ -708,13 +681,15 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "setrtg6"); + QETH_CARD_TEXT(card, 3, "setrtg6"); #ifdef CONFIG_QETH_IPV6 if (!qeth_is_supported(card, IPA_IPV6)) return 0; - qeth_l3_correct_routing_type(card, &card->options.route6.type, + rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, QETH_PROT_IPV6); + if (rc) + return rc; rc = qeth_l3_send_setrouting(card, card->options.route6.type, QETH_PROT_IPV6); @@ -752,7 +727,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, unsigned long flags; int rc = 0; - QETH_DBF_TEXT(TRACE, 2, "addipato"); + QETH_CARD_TEXT(card, 2, "addipato"); spin_lock_irqsave(&card->ip_lock, flags); list_for_each_entry(ipatoe, &card->ipato.entries, entry) { if (ipatoe->proto != new->proto) @@ -777,7 +752,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *ipatoe, *tmp; unsigned long flags; - QETH_DBF_TEXT(TRACE, 2, "delipato"); + QETH_CARD_TEXT(card, 2, "delipato"); spin_lock_irqsave(&card->ip_lock, flags); list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { if (ipatoe->proto != proto) @@ -805,11 +780,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "addvipa4"); + QETH_CARD_TEXT(card, 2, "addvipa4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "addvipa6"); + QETH_CARD_TEXT(card, 2, "addvipa6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } @@ -824,6 +799,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, rc = -EEXIST; spin_unlock_irqrestore(&card->ip_lock, flags); if (rc) { + kfree(ipaddr); return rc; } if (!qeth_l3_add_ip(card, ipaddr)) @@ -840,11 +816,11 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "delvipa4"); + QETH_CARD_TEXT(card, 2, "delvipa4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "delvipa6"); + QETH_CARD_TEXT(card, 2, "delvipa6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } @@ -869,11 +845,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "addrxip4"); + QETH_CARD_TEXT(card, 2, "addrxip4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "addrxip6"); + QETH_CARD_TEXT(card, 2, "addrxip6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } @@ -888,6 +864,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, rc = -EEXIST; spin_unlock_irqrestore(&card->ip_lock, flags); if (rc) { + kfree(ipaddr); return rc; } if (!qeth_l3_add_ip(card, ipaddr)) @@ -904,11 +881,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "addrxip4"); + QETH_CARD_TEXT(card, 2, "addrxip4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "addrxip6"); + QETH_CARD_TEXT(card, 2, "addrxip6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } @@ -928,15 +905,15 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, int cnt = 3; if (addr->proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "setaddr4"); - QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); + QETH_CARD_TEXT(card, 2, "setaddr4"); + QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); } else if (addr->proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "setaddr6"); - QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); - QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); + QETH_CARD_TEXT(card, 2, "setaddr6"); + QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); } else { - QETH_DBF_TEXT(TRACE, 2, "setaddr?"); - QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); + QETH_CARD_TEXT(card, 2, "setaddr?"); + QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); } do { if (addr->is_multicast) @@ -945,10 +922,10 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP, addr->set_flags); if (rc) - QETH_DBF_TEXT(TRACE, 2, "failed"); + QETH_CARD_TEXT(card, 2, "failed"); } while ((--cnt > 0) && rc); if (rc) { - QETH_DBF_TEXT(TRACE, 2, "FAILED"); + QETH_CARD_TEXT(card, 2, "FAILED"); qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); dev_warn(&card->gdev->dev, "Registering IP address %s failed\n", buf); @@ -962,15 +939,15 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, int rc = 0; if (addr->proto == QETH_PROT_IPV4) { - QETH_DBF_TEXT(TRACE, 2, "deladdr4"); - QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); + QETH_CARD_TEXT(card, 2, "deladdr4"); + QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); } else if (addr->proto == QETH_PROT_IPV6) { - QETH_DBF_TEXT(TRACE, 2, "deladdr6"); - QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); - QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); + QETH_CARD_TEXT(card, 2, "deladdr6"); + QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); } else { - QETH_DBF_TEXT(TRACE, 2, "deladdr?"); - QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); + QETH_CARD_TEXT(card, 2, "deladdr?"); + QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); } if (addr->is_multicast) rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); @@ -978,7 +955,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, addr->del_flags); if (rc) - QETH_DBF_TEXT(TRACE, 2, "failed"); + QETH_CARD_TEXT(card, 2, "failed"); return rc; } @@ -1004,57 +981,6 @@ static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) return ct | QETH_CAST_UNICAST; } -static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command, - __u32 mode) -{ - int rc; - struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; - - QETH_DBF_TEXT(TRACE, 4, "adpmode"); - - iob = qeth_get_adapter_cmd(card, command, - sizeof(struct qeth_ipacmd_setadpparms)); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.setadapterparms.data.mode = mode; - rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb, - NULL); - return rc; -} - -static int qeth_l3_setadapter_hstr(struct qeth_card *card) -{ - int rc; - - QETH_DBF_TEXT(TRACE, 4, "adphstr"); - - if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) { - rc = qeth_l3_send_setadp_mode(card, - IPA_SETADP_SET_BROADCAST_MODE, - card->options.broadcast_mode); - if (rc) - QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on " - "device %s: x%x\n", - CARD_BUS_ID(card), rc); - rc = qeth_l3_send_setadp_mode(card, - IPA_SETADP_ALTER_MAC_ADDRESS, - card->options.macaddr_mode); - if (rc) - QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on " - "device %s: x%x\n", CARD_BUS_ID(card), rc); - return rc; - } - if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL) - QETH_DBF_MESSAGE(2, "set adapter parameters not available " - "to set broadcast mode, using ALLRINGS " - "on device %s:\n", CARD_BUS_ID(card)); - if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL) - QETH_DBF_MESSAGE(2, "set adapter parameters not available " - "to set macaddr mode, using NONCANONICAL " - "on device %s:\n", CARD_BUS_ID(card)); - return 0; -} - static int qeth_l3_setadapter_parms(struct qeth_card *card) { int rc; @@ -1080,10 +1006,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card) " address failed\n"); } - if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || - (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) - rc = qeth_l3_setadapter_hstr(card); - return rc; } @@ -1092,7 +1014,7 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "defadpcb"); + QETH_CARD_TEXT(card, 4, "defadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code == 0) { @@ -1105,8 +1027,15 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card, if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; - QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask); + QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask); } + if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM && + cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { + card->info.tx_csum_mask = + cmd->data.setassparms.data.flags_32bit; + QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask); + } + return 0; } @@ -1117,7 +1046,7 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "getasscm"); + QETH_CARD_TEXT(card, 4, "getasscm"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); @@ -1139,7 +1068,7 @@ static int qeth_l3_send_setassparms(struct qeth_card *card, int rc; struct qeth_ipa_cmd *cmd; - QETH_DBF_TEXT(TRACE, 4, "sendassp"); + QETH_CARD_TEXT(card, 4, "sendassp"); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); if (len <= sizeof(__u32)) @@ -1158,7 +1087,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, int rc; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 4, "simassp6"); + QETH_CARD_TEXT(card, 4, "simassp6"); iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 0, QETH_PROT_IPV6); rc = qeth_l3_send_setassparms(card, iob, 0, 0, @@ -1174,7 +1103,7 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card, int length = 0; struct qeth_cmd_buffer *iob; - QETH_DBF_TEXT(TRACE, 4, "simassp4"); + QETH_CARD_TEXT(card, 4, "simassp4"); if (data) length = sizeof(__u32); iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, @@ -1188,7 +1117,7 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "ipaarp"); + QETH_CARD_TEXT(card, 3, "ipaarp"); if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { dev_info(&card->gdev->dev, @@ -1210,7 +1139,7 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); + QETH_CARD_TEXT(card, 3, "ipaipfrg"); if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { dev_info(&card->gdev->dev, @@ -1235,7 +1164,7 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "stsrcmac"); + QETH_CARD_TEXT(card, 3, "stsrcmac"); if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { dev_info(&card->gdev->dev, @@ -1257,7 +1186,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "strtvlan"); + QETH_CARD_TEXT(card, 3, "strtvlan"); if (!qeth_is_supported(card, IPA_FULL_VLAN)) { dev_info(&card->gdev->dev, @@ -1281,7 +1210,7 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "stmcast"); + QETH_CARD_TEXT(card, 3, "stmcast"); if (!qeth_is_supported(card, IPA_MULTICASTING)) { dev_info(&card->gdev->dev, @@ -1303,50 +1232,17 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card) return rc; } -static int qeth_l3_query_ipassists_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) -{ - struct qeth_ipa_cmd *cmd; - - QETH_DBF_TEXT(SETUP, 2, "qipasscb"); - - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.prot_version == QETH_PROT_IPV4) { - card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; - card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; - } else { - card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; - card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; - } - QETH_DBF_TEXT(SETUP, 2, "suppenbl"); - QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported); - QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled); - return 0; -} - -static int qeth_l3_query_ipassists(struct qeth_card *card, - enum qeth_prot_versions prot) -{ - int rc; - struct qeth_cmd_buffer *iob; - - QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); - iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); - rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL); - return rc; -} - #ifdef CONFIG_QETH_IPV6 static int qeth_l3_softsetup_ipv6(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "softipv6"); + QETH_CARD_TEXT(card, 3, "softipv6"); if (card->info.type == QETH_CARD_TYPE_IQD) goto out; - rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); + rc = qeth_query_ipassists(card, QETH_PROT_IPV6); if (rc) { dev_err(&card->gdev->dev, "Activating IPv6 support for %s failed\n", @@ -1387,7 +1283,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) { int rc = 0; - QETH_DBF_TEXT(TRACE, 3, "strtipv6"); + QETH_CARD_TEXT(card, 3, "strtipv6"); if (!qeth_is_supported(card, IPA_IPV6)) { dev_info(&card->gdev->dev, @@ -1404,7 +1300,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); + QETH_CARD_TEXT(card, 3, "stbrdcst"); card->info.broadcast_capable = 0; if (!qeth_is_supported(card, IPA_FILTERING)) { dev_info(&card->gdev->dev, @@ -1471,67 +1367,59 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card) return 0; } -int qeth_l3_set_rx_csum(struct qeth_card *card, - enum qeth_checksum_types csum_type) +static int qeth_l3_set_rx_csum(struct qeth_card *card, int on) { int rc = 0; - if (card->options.checksum_type == HW_CHECKSUMMING) { - if ((csum_type != HW_CHECKSUMMING) && - (card->state != CARD_STATE_DOWN)) { - rc = qeth_l3_send_simple_setassparms(card, - IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0); - if (rc) - return -EIO; - } + if (on) { + rc = qeth_l3_send_checksum_command(card); + if (rc) + return -EIO; + dev_info(&card->gdev->dev, + "HW Checksumming (inbound) enabled\n"); } else { - if (csum_type == HW_CHECKSUMMING) { - if (card->state != CARD_STATE_DOWN) { - if (!qeth_is_supported(card, - IPA_INBOUND_CHECKSUM)) - return -EPERM; - rc = qeth_l3_send_checksum_command(card); - if (rc) - return -EIO; - } - } + rc = qeth_l3_send_simple_setassparms(card, + IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0); + if (rc) + return -EIO; } - card->options.checksum_type = csum_type; - return rc; + + return 0; } static int qeth_l3_start_ipa_checksum(struct qeth_card *card) { - int rc = 0; + QETH_CARD_TEXT(card, 3, "strtcsum"); - QETH_DBF_TEXT(TRACE, 3, "strtcsum"); - - if (card->options.checksum_type == NO_CHECKSUMMING) { - dev_info(&card->gdev->dev, - "Using no checksumming on %s.\n", - QETH_CARD_IFNAME(card)); - return 0; - } - if (card->options.checksum_type == SW_CHECKSUMMING) { - dev_info(&card->gdev->dev, - "Using SW checksumming on %s.\n", - QETH_CARD_IFNAME(card)); - return 0; - } - if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { - dev_info(&card->gdev->dev, - "Inbound HW Checksumming not " - "supported on %s,\ncontinuing " - "using Inbound SW Checksumming\n", - QETH_CARD_IFNAME(card)); - card->options.checksum_type = SW_CHECKSUMMING; - return 0; + if (card->dev->features & NETIF_F_RXCSUM) { + rtnl_lock(); + /* force set_features call */ + card->dev->features &= ~NETIF_F_RXCSUM; + netdev_update_features(card->dev); + rtnl_unlock(); } - rc = qeth_l3_send_checksum_command(card); - if (!rc) - dev_info(&card->gdev->dev, - "HW Checksumming (inbound) enabled\n"); + return 0; +} + +static int qeth_l3_start_ipa_tx_checksum(struct qeth_card *card) +{ + int rc = 0; + if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) + return rc; + rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM, + IPA_CMD_ASS_START, 0); + if (rc) + goto err_out; + rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM, + IPA_CMD_ASS_ENABLE, card->info.tx_csum_mask); + if (rc) + goto err_out; + dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n"); + return rc; +err_out: + dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s " + "failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card)); return rc; } @@ -1539,7 +1427,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) { int rc; - QETH_DBF_TEXT(TRACE, 3, "sttso"); + QETH_CARD_TEXT(card, 3, "sttso"); if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { dev_info(&card->gdev->dev, @@ -1557,18 +1445,17 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) dev_info(&card->gdev->dev, "Outbound TSO enabled\n"); } - if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { - card->options.large_send = QETH_LARGE_SEND_NO; - card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); - } + if (rc) + card->dev->features &= ~NETIF_F_TSO; return rc; } static int qeth_l3_start_ipassists(struct qeth_card *card) { - QETH_DBF_TEXT(TRACE, 3, "strtipas"); + QETH_CARD_TEXT(card, 3, "strtipas"); - qeth_set_access_ctrl_online(card); /* go on*/ + if (qeth_set_access_ctrl_online(card, 0)) + return -EIO; qeth_l3_start_ipa_arp_processing(card); /* go on*/ qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ qeth_l3_start_ipa_source_mac(card); /* go on*/ @@ -1577,33 +1464,11 @@ static int qeth_l3_start_ipassists(struct qeth_card *card) qeth_l3_start_ipa_ipv6(card); /* go on*/ qeth_l3_start_ipa_broadcast(card); /* go on*/ qeth_l3_start_ipa_checksum(card); /* go on*/ + qeth_l3_start_ipa_tx_checksum(card); qeth_l3_start_ipa_tso(card); /* go on*/ return 0; } -static int qeth_l3_put_unique_id(struct qeth_card *card) -{ - - int rc = 0; - struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; - - QETH_DBF_TEXT(TRACE, 2, "puniqeid"); - - if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) == - UNIQUE_ID_NOT_BY_CARD) - return -1; - iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR, - QETH_PROT_IPV6); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = - card->info.unique_id; - memcpy(&cmd->data.create_destroy_addr.unique_id[0], - card->dev->dev_addr, OSA_ADDR_LEN); - rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); - return rc; -} - static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -1614,7 +1479,7 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, memcpy(card->dev->dev_addr, cmd->data.create_destroy_addr.unique_id, ETH_ALEN); else - random_ether_addr(card->dev->dev_addr); + eth_random_addr(card->dev->dev_addr); return 0; } @@ -1692,7 +1557,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, cmd = (struct qeth_ipa_cmd *)data; rc = cmd->hdr.return_code; if (rc) - QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc); + QETH_CARD_TEXT_(card, 2, "dxter%x", rc); switch (cmd->data.diagass.action) { case QETH_DIAGS_CMD_TRACE_QUERY: break; @@ -1757,10 +1622,7 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev) { - if (dev->type == ARPHRD_IEEE802_TR) - ip_tr_mc_map(ipm, mac); - else - ip_eth_mc_map(ipm, mac); + ip_eth_mc_map(ipm, mac); } static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) @@ -1769,8 +1631,9 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) struct ip_mc_list *im4; char buf[MAX_ADDR_LEN]; - QETH_DBF_TEXT(TRACE, 4, "addmc"); - for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { + QETH_CARD_TEXT(card, 4, "addmc"); + for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; + im4 = rcu_dereference(im4->next_rcu)) { qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); if (!ipm) @@ -1783,29 +1646,28 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) } } +/* called with rcu_read_lock */ static void qeth_l3_add_vlan_mc(struct qeth_card *card) { struct in_device *in_dev; - struct vlan_group *vg; - int i; + u16 vid; - QETH_DBF_TEXT(TRACE, 4, "addmcvl"); - if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) + QETH_CARD_TEXT(card, 4, "addmcvl"); + if (!qeth_is_supported(card, IPA_FULL_VLAN)) return; - vg = card->vlangrp; - for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { - struct net_device *netdev = vlan_group_get_device(vg, i); + for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { + struct net_device *netdev; + + netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), + vid); if (netdev == NULL || !(netdev->flags & IFF_UP)) continue; - in_dev = in_dev_get(netdev); + in_dev = __in_dev_get_rcu(netdev); if (!in_dev) continue; - read_lock(&in_dev->mc_list_lock); qeth_l3_add_mc(card, in_dev); - read_unlock(&in_dev->mc_list_lock); - in_dev_put(in_dev); } } @@ -1813,15 +1675,15 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) { struct in_device *in4_dev; - QETH_DBF_TEXT(TRACE, 4, "chkmcv4"); - in4_dev = in_dev_get(card->dev); + QETH_CARD_TEXT(card, 4, "chkmcv4"); + rcu_read_lock(); + in4_dev = __in_dev_get_rcu(card->dev); if (in4_dev == NULL) - return; - read_lock(&in4_dev->mc_list_lock); + goto unlock; qeth_l3_add_mc(card, in4_dev); qeth_l3_add_vlan_mc(card); - read_unlock(&in4_dev->mc_list_lock); - in_dev_put(in4_dev); +unlock: + rcu_read_unlock(); } #ifdef CONFIG_QETH_IPV6 @@ -1831,7 +1693,7 @@ static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) struct ifmcaddr6 *im6; char buf[MAX_ADDR_LEN]; - QETH_DBF_TEXT(TRACE, 4, "addmc6"); + QETH_CARD_TEXT(card, 4, "addmc6"); for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); @@ -1846,19 +1708,21 @@ static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) } } +/* called with rcu_read_lock */ static void qeth_l3_add_vlan_mc6(struct qeth_card *card) { struct inet6_dev *in_dev; - struct vlan_group *vg; - int i; + u16 vid; - QETH_DBF_TEXT(TRACE, 4, "admc6vl"); - if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) + QETH_CARD_TEXT(card, 4, "admc6vl"); + if (!qeth_is_supported(card, IPA_FULL_VLAN)) return; - vg = card->vlangrp; - for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { - struct net_device *netdev = vlan_group_get_device(vg, i); + for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { + struct net_device *netdev; + + netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), + vid); if (netdev == NULL || !(netdev->flags & IFF_UP)) continue; @@ -1876,16 +1740,18 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) { struct inet6_dev *in6_dev; - QETH_DBF_TEXT(TRACE, 4, "chkmcv6"); + QETH_CARD_TEXT(card, 4, "chkmcv6"); if (!qeth_is_supported(card, IPA_IPV6)) return ; in6_dev = in6_dev_get(card->dev); if (in6_dev == NULL) return; + rcu_read_lock(); read_lock_bh(&in6_dev->lock); qeth_l3_add_mc6(card, in6_dev); qeth_l3_add_vlan_mc6(card); read_unlock_bh(&in6_dev->lock); + rcu_read_unlock(); in6_dev_put(in6_dev); } #endif /* CONFIG_QETH_IPV6 */ @@ -1896,10 +1762,14 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, struct in_device *in_dev; struct in_ifaddr *ifa; struct qeth_ipaddr *addr; + struct net_device *netdev; - QETH_DBF_TEXT(TRACE, 4, "frvaddr4"); + QETH_CARD_TEXT(card, 4, "frvaddr4"); - in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid)); + netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid); + if (!netdev) + return; + in_dev = in_dev_get(netdev); if (!in_dev) return; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { @@ -1922,13 +1792,17 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, struct inet6_dev *in6_dev; struct inet6_ifaddr *ifa; struct qeth_ipaddr *addr; + struct net_device *netdev; - QETH_DBF_TEXT(TRACE, 4, "frvaddr6"); + QETH_CARD_TEXT(card, 4, "frvaddr6"); - in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); + netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid); + if (!netdev) + return; + in6_dev = in6_dev_get(netdev); if (!in6_dev) return; - for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) { + list_for_each_entry(ifa, &in6_dev->addr_list, if_list) { addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); if (addr) { memcpy(&addr->u.a6.addr, &ifa->addr, @@ -1946,54 +1820,49 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, static void qeth_l3_free_vlan_addresses(struct qeth_card *card, unsigned short vid) { - if (!card->vlangrp) - return; + rcu_read_lock(); qeth_l3_free_vlan_addresses4(card, vid); qeth_l3_free_vlan_addresses6(card, vid); + rcu_read_unlock(); } -static void qeth_l3_vlan_rx_register(struct net_device *dev, - struct vlan_group *grp) +static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct qeth_card *card = dev->ml_priv; - unsigned long flags; - - QETH_DBF_TEXT(TRACE, 4, "vlanreg"); - spin_lock_irqsave(&card->vlanlock, flags); - card->vlangrp = grp; - spin_unlock_irqrestore(&card->vlanlock, flags); -} -static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) -{ - return; + set_bit(vid, card->active_vlans); + return 0; } -static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) { struct qeth_card *card = dev->ml_priv; unsigned long flags; - QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); + QETH_CARD_TEXT_(card, 4, "kid:%d", vid); if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_DBF_TEXT(TRACE, 3, "kidREC"); - return; + QETH_CARD_TEXT(card, 3, "kidREC"); + return 0; } spin_lock_irqsave(&card->vlanlock, flags); /* unregister IP addresses of vlan device */ qeth_l3_free_vlan_addresses(card, vid); - vlan_group_set_device(card->vlangrp, vid, NULL); + clear_bit(vid, card->active_vlans); spin_unlock_irqrestore(&card->vlanlock, flags); qeth_l3_set_multicast_list(card->dev); + return 0; } -static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, - struct sk_buff *skb, struct qeth_hdr *hdr) +static inline int qeth_l3_rebuild_skb(struct qeth_card *card, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned short *vlan_id) { - unsigned short vlan_id = 0; __be16 prot; struct iphdr *ip_hdr; unsigned char tg_addr[MAX_ADDR_LEN]; + int is_vlan = 0; if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : @@ -2010,8 +1879,6 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, #endif case __constant_htons(ETH_P_IP): ip_hdr = (struct iphdr *)skb->data; - (card->dev->type == ARPHRD_IEEE802_TR) ? - ip_tr_mc_map(ip_hdr->daddr, tg_addr): ip_eth_mc_map(ip_hdr->daddr, tg_addr); break; default: @@ -2047,27 +1914,16 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, tg_addr, "FAKELL", card->dev->addr_len); } -#ifdef CONFIG_TR - if (card->dev->type == ARPHRD_IEEE802_TR) - skb->protocol = tr_type_trans(skb, card->dev); - else -#endif - skb->protocol = eth_type_trans(skb, card->dev); + skb->protocol = eth_type_trans(skb, card->dev); if (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { - vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)? + *vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); + is_vlan = 1; } - switch (card->options.checksum_type) { - case SW_CHECKSUMMING: - skb->ip_summed = CHECKSUM_NONE; - break; - case NO_CHECKSUMMING: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - case HW_CHECKSUMMING: + if (card->dev->features & NETIF_F_RXCSUM) { if ((hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_CSUM_HDR_REQ | QETH_HDR_EXT_CSUM_TRANSP_REQ)) == @@ -2076,84 +1932,167 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; - } + } else + skb->ip_summed = CHECKSUM_NONE; - return vlan_id; + return is_vlan; } -static void qeth_l3_process_inbound_buffer(struct qeth_card *card, - struct qeth_qdio_buffer *buf, int index) +static int qeth_l3_process_inbound_buffer(struct qeth_card *card, + int budget, int *done) { - struct qdio_buffer_element *element; + int work_done = 0; struct sk_buff *skb; struct qeth_hdr *hdr; - int offset; __u16 vlan_tag = 0; + int is_vlan; unsigned int len; - /* get first element of current buffer */ - element = (struct qdio_buffer_element *)&buf->buffer->element[0]; - offset = 0; - if (card->options.performance_stats) - card->perf_stats.bufs_rec++; - while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element, - &offset, &hdr))) { - skb->dev = card->dev; - /* is device UP ? */ - if (!(card->dev->flags & IFF_UP)) { - dev_kfree_skb_any(skb); - continue; + __u16 magic; + + *done = 0; + WARN_ON_ONCE(!budget); + while (budget) { + skb = qeth_core_get_next_skb(card, + &card->qdio.in_q->bufs[card->rx.b_index], + &card->rx.b_element, &card->rx.e_offset, &hdr); + if (!skb) { + *done = 1; + break; } - + skb->dev = card->dev; switch (hdr->hdr.l3.id) { case QETH_HEADER_TYPE_LAYER3: - vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); - len = skb->len; - if (vlan_tag && !card->options.sniffer) - if (card->vlangrp) - vlan_hwaccel_rx(skb, card->vlangrp, - vlan_tag); - else { - dev_kfree_skb_any(skb); - continue; - } - else - netif_rx(skb); + magic = *(__u16 *)skb->data; + if ((card->info.type == QETH_CARD_TYPE_IQD) && + (magic == ETH_P_AF_IUCV)) { + skb->protocol = ETH_P_AF_IUCV; + skb->pkt_type = PACKET_HOST; + skb->mac_header = NET_SKB_PAD; + skb->dev = card->dev; + len = skb->len; + card->dev->header_ops->create(skb, card->dev, 0, + card->dev->dev_addr, "FAKELL", + card->dev->addr_len); + netif_receive_skb(skb); + } else { + is_vlan = qeth_l3_rebuild_skb(card, skb, hdr, + &vlan_tag); + len = skb->len; + if (is_vlan && !card->options.sniffer) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), vlan_tag); + napi_gro_receive(&card->napi, skb); + } break; case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); - if (card->options.checksum_type == NO_CHECKSUMMING) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb->ip_summed = CHECKSUM_NONE; len = skb->len; netif_receive_skb(skb); break; default: dev_kfree_skb_any(skb); - QETH_DBF_TEXT(TRACE, 3, "inbunkno"); + QETH_CARD_TEXT(card, 3, "inbunkno"); QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); continue; } - + work_done++; + budget--; card->stats.rx_packets++; card->stats.rx_bytes += len; } + return work_done; +} + +static int qeth_l3_poll(struct napi_struct *napi, int budget) +{ + struct qeth_card *card = container_of(napi, struct qeth_card, napi); + int work_done = 0; + struct qeth_qdio_buffer *buffer; + int done; + int new_budget = budget; + + if (card->options.performance_stats) { + card->perf_stats.inbound_cnt++; + card->perf_stats.inbound_start_time = qeth_get_micros(); + } + + while (1) { + if (!card->rx.b_count) { + card->rx.qdio_err = 0; + card->rx.b_count = qdio_get_next_buffers( + card->data.ccwdev, 0, &card->rx.b_index, + &card->rx.qdio_err); + if (card->rx.b_count <= 0) { + card->rx.b_count = 0; + break; + } + card->rx.b_element = + &card->qdio.in_q->bufs[card->rx.b_index] + .buffer->element[0]; + card->rx.e_offset = 0; + } + + while (card->rx.b_count) { + buffer = &card->qdio.in_q->bufs[card->rx.b_index]; + if (!(card->rx.qdio_err && + qeth_check_qdio_errors(card, buffer->buffer, + card->rx.qdio_err, "qinerr"))) + work_done += qeth_l3_process_inbound_buffer( + card, new_budget, &done); + else + done = 1; + + if (done) { + if (card->options.performance_stats) + card->perf_stats.bufs_rec++; + qeth_put_buffer_pool_entry(card, + buffer->pool_entry); + qeth_queue_input_buffer(card, card->rx.b_index); + card->rx.b_count--; + if (card->rx.b_count) { + card->rx.b_index = + (card->rx.b_index + 1) % + QDIO_MAX_BUFFERS_PER_Q; + card->rx.b_element = + &card->qdio.in_q + ->bufs[card->rx.b_index] + .buffer->element[0]; + card->rx.e_offset = 0; + } + } + + if (work_done >= budget) + goto out; + else + new_budget = budget - work_done; + } + } + + napi_complete(napi); + if (qdio_start_irq(card->data.ccwdev, 0)) + napi_schedule(&card->napi); +out: + if (card->options.performance_stats) + card->perf_stats.inbound_time += qeth_get_micros() - + card->perf_stats.inbound_start_time; + return work_done; } static int qeth_l3_verify_vlan_dev(struct net_device *dev, struct qeth_card *card) { int rc = 0; - struct vlan_group *vg; - int i; + u16 vid; - vg = card->vlangrp; - if (!vg) - return rc; + for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { + struct net_device *netdev; - for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { - if (vlan_group_get_device(vg, i) == dev) { + rcu_read_lock(); + netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), + vid); + rcu_read_unlock(); + if (netdev == dev) { rc = QETH_VLAN_CARD; break; } @@ -2198,7 +2137,8 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) card = vlan_dev_real_dev(dev)->ml_priv; if (card && card->options.layer2) card = NULL; - QETH_DBF_TEXT_(TRACE, 4, "%d", rc); + if (card) + QETH_CARD_TEXT_(card, 4, "%d", rc); return card ; } @@ -2223,25 +2163,14 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) dev_close(card->dev); rtnl_unlock(); } - if (!card->use_hard_stop) { - rc = qeth_send_stoplan(card); - if (rc) - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - } card->state = CARD_STATE_SOFTSETUP; } if (card->state == CARD_STATE_SOFTSETUP) { - qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1); + qeth_l3_clear_ip_list(card, 1); qeth_clear_ipacmd_list(card); card->state = CARD_STATE_HARDSETUP; } if (card->state == CARD_STATE_HARDSETUP) { - if (!card->use_hard_stop && - (card->info.type != QETH_CARD_TYPE_IQD)) { - rc = qeth_l3_put_unique_id(card); - if (rc) - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); - } qeth_qdio_clear_card(card, 0); qeth_clear_qdio_buffers(card); qeth_clear_working_pool_list(card); @@ -2251,7 +2180,6 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) qeth_clear_cmd_buffers(&card->read); qeth_clear_cmd_buffers(&card->write); } - card->use_hard_stop = 0; return rc; } @@ -2276,10 +2204,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card) } else if (card->options.sniffer && /* HiperSockets trace */ qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { if (dev->flags & IFF_PROMISC) { - QETH_DBF_TEXT(TRACE, 3, "+promisc"); + QETH_CARD_TEXT(card, 3, "+promisc"); qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); } else { - QETH_DBF_TEXT(TRACE, 3, "-promisc"); + QETH_CARD_TEXT(card, 3, "-promisc"); qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); } } @@ -2289,7 +2217,7 @@ static void qeth_l3_set_multicast_list(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 3, "setmulti"); + QETH_CARD_TEXT(card, 3, "setmulti"); if (qeth_threads_running(card, QETH_RECOVER_THREAD) && (card->state != CARD_STATE_UP)) return; @@ -2334,7 +2262,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) int tmp; int rc; - QETH_DBF_TEXT(TRACE, 3, "arpstnoe"); + QETH_CARD_TEXT(card, 3, "arpstnoe"); /* * currently GuestLAN only supports the ARP assist function @@ -2358,22 +2286,46 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) return rc; } -static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, - struct qeth_arp_query_data *qdata, int entry_size, - int uentry_size) +static __u32 get_arp_entry_size(struct qeth_card *card, + struct qeth_arp_query_data *qdata, + struct qeth_arp_entrytype *type, __u8 strip_entries) { - char *entry_ptr; - char *uentry_ptr; - int i; + __u32 rc; + __u8 is_hsi; - entry_ptr = (char *)&qdata->data; - uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset); - for (i = 0; i < qdata->no_entries; ++i) { - /* strip off 32 bytes "media specific information" */ - memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32); - entry_ptr += entry_size; - uentry_ptr += uentry_size; + is_hsi = qdata->reply_bits == 5; + if (type->ip == QETHARP_IP_ADDR_V4) { + QETH_CARD_TEXT(card, 4, "arpev4"); + if (strip_entries) { + rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) : + sizeof(struct qeth_arp_qi_entry7_short); + } else { + rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) : + sizeof(struct qeth_arp_qi_entry7); + } + } else if (type->ip == QETHARP_IP_ADDR_V6) { + QETH_CARD_TEXT(card, 4, "arpev6"); + if (strip_entries) { + rc = is_hsi ? + sizeof(struct qeth_arp_qi_entry5_short_ipv6) : + sizeof(struct qeth_arp_qi_entry7_short_ipv6); + } else { + rc = is_hsi ? + sizeof(struct qeth_arp_qi_entry5_ipv6) : + sizeof(struct qeth_arp_qi_entry7_ipv6); + } + } else { + QETH_CARD_TEXT(card, 4, "arpinv"); + rc = 0; } + + return rc; +} + +static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot) +{ + return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) || + (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6); } static int qeth_l3_arp_query_cb(struct qeth_card *card, @@ -2382,72 +2334,77 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd; struct qeth_arp_query_data *qdata; struct qeth_arp_query_info *qinfo; - int entry_size; - int uentry_size; int i; + int e; + int entrybytes_done; + int stripped_bytes; + __u8 do_strip_entries; - QETH_DBF_TEXT(TRACE, 4, "arpquecb"); + QETH_CARD_TEXT(card, 3, "arpquecb"); qinfo = (struct qeth_arp_query_info *) reply->param; cmd = (struct qeth_ipa_cmd *) data; + QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version); if (cmd->hdr.return_code) { - QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code); + QETH_CARD_TEXT(card, 4, "arpcberr"); + QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); return 0; } if (cmd->data.setassparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; - QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code); + QETH_CARD_TEXT(card, 4, "setaperr"); + QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); return 0; } qdata = &cmd->data.setassparms.data.query_arp; - switch (qdata->reply_bits) { - case 5: - uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5); - if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) - uentry_size = sizeof(struct qeth_arp_qi_entry5_short); - break; - case 7: - /* fall through to default */ - default: - /* tr is the same as eth -> entry7 */ - uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7); - if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) - uentry_size = sizeof(struct qeth_arp_qi_entry7_short); - break; - } - /* check if there is enough room in userspace */ - if ((qinfo->udata_len - qinfo->udata_offset) < - qdata->no_entries * uentry_size){ - QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM); - cmd->hdr.return_code = -ENOMEM; - goto out_error; - } - QETH_DBF_TEXT_(TRACE, 4, "anore%i", - cmd->data.setassparms.hdr.number_of_replies); - QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no); - QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries); - - if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { - /* strip off "media specific information" */ - qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size, - uentry_size); - } else - /*copy entries to user buffer*/ - memcpy(qinfo->udata + qinfo->udata_offset, - (char *)&qdata->data, qdata->no_entries*uentry_size); + QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); + + do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0; + stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0; + entrybytes_done = 0; + for (e = 0; e < qdata->no_entries; ++e) { + char *cur_entry; + __u32 esize; + struct qeth_arp_entrytype *etype; + + cur_entry = &qdata->data + entrybytes_done; + etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type; + if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) { + QETH_CARD_TEXT(card, 4, "pmis"); + QETH_CARD_TEXT_(card, 4, "%i", etype->ip); + break; + } + esize = get_arp_entry_size(card, qdata, etype, + do_strip_entries); + QETH_CARD_TEXT_(card, 5, "esz%i", esize); + if (!esize) + break; - qinfo->no_entries += qdata->no_entries; - qinfo->udata_offset += (qdata->no_entries*uentry_size); + if ((qinfo->udata_len - qinfo->udata_offset) < esize) { + QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM); + cmd->hdr.return_code = IPA_RC_ENOMEM; + goto out_error; + } + + memcpy(qinfo->udata + qinfo->udata_offset, + &qdata->data + entrybytes_done + stripped_bytes, + esize); + entrybytes_done += esize + stripped_bytes; + qinfo->udata_offset += esize; + ++qinfo->no_entries; + } /* check if all replies received ... */ if (cmd->data.setassparms.hdr.seq_no < cmd->data.setassparms.hdr.number_of_replies) return 1; + QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries); memcpy(qinfo->udata, &qinfo->no_entries, 4); /* keep STRIP_ENTRIES flag so the user program can distinguish * stripped entries from normal ones */ if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); + QETH_CARD_TEXT_(card, 4, "rc%i", 0); return 0; out_error: i = 0; @@ -2461,7 +2418,7 @@ static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card, unsigned long), void *reply_param) { - QETH_DBF_TEXT(TRACE, 4, "sendarp"); + QETH_CARD_TEXT(card, 4, "sendarp"); memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), @@ -2470,45 +2427,86 @@ static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card, reply_cb, reply_param); } -static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) +static int qeth_l3_query_arp_cache_info(struct qeth_card *card, + enum qeth_prot_versions prot, + struct qeth_arp_query_info *qinfo) { struct qeth_cmd_buffer *iob; - struct qeth_arp_query_info qinfo = {0, }; + struct qeth_ipa_cmd *cmd; int tmp; int rc; - QETH_DBF_TEXT(TRACE, 3, "arpquery"); + QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot); + + iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_QUERY_INFO, + sizeof(struct qeth_arp_query_data) - sizeof(char), + prot); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setassparms.data.query_arp.request_bits = 0x000F; + cmd->data.setassparms.data.query_arp.reply_bits = 0; + cmd->data.setassparms.data.query_arp.no_entries = 0; + rc = qeth_l3_send_ipa_arp_cmd(card, iob, + QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN, + qeth_l3_arp_query_cb, (void *)qinfo); + if (rc) { + tmp = rc; + QETH_DBF_MESSAGE(2, + "Error while querying ARP cache on %s: %s " + "(0x%x/%d)\n", QETH_CARD_IFNAME(card), + qeth_l3_arp_get_error_cause(&rc), tmp, tmp); + } + + return rc; +} + +static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) +{ + struct qeth_arp_query_info qinfo = {0, }; + int rc; + + QETH_CARD_TEXT(card, 3, "arpquery"); if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ IPA_ARP_PROCESSING)) { - return -EOPNOTSUPP; + QETH_CARD_TEXT(card, 3, "arpqnsup"); + rc = -EOPNOTSUPP; + goto out; } /* get size of userspace buffer and mask_bits -> 6 bytes */ - if (copy_from_user(&qinfo, udata, 6)) - return -EFAULT; + if (copy_from_user(&qinfo, udata, 6)) { + rc = -EFAULT; + goto out; + } qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); - if (!qinfo.udata) - return -ENOMEM; + if (!qinfo.udata) { + rc = -ENOMEM; + goto out; + } qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; - iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, - IPA_CMD_ASS_ARP_QUERY_INFO, - sizeof(int), QETH_PROT_IPV4); - - rc = qeth_l3_send_ipa_arp_cmd(card, iob, - QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN, - qeth_l3_arp_query_cb, (void *)&qinfo); + rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo); if (rc) { - tmp = rc; - QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s " - "(0x%x/%d)\n", QETH_CARD_IFNAME(card), - qeth_l3_arp_get_error_cause(&rc), tmp, tmp); if (copy_to_user(udata, qinfo.udata, 4)) rc = -EFAULT; + goto free_and_out; } else { - if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) +#ifdef CONFIG_QETH_IPV6 + if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) { + /* fails in case of GuestLAN QDIO mode */ + qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, + &qinfo); + } +#endif + if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) { + QETH_CARD_TEXT(card, 4, "qactf"); rc = -EFAULT; + goto free_and_out; + } + QETH_CARD_TEXT_(card, 4, "qacts"); } +free_and_out: kfree(qinfo.udata); +out: return rc; } @@ -2520,7 +2518,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, int tmp; int rc; - QETH_DBF_TEXT(TRACE, 3, "arpadent"); + QETH_CARD_TEXT(card, 3, "arpadent"); /* * currently GuestLAN only supports the ARP assist function @@ -2559,7 +2557,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, int tmp; int rc; - QETH_DBF_TEXT(TRACE, 3, "arprment"); + QETH_CARD_TEXT(card, 3, "arprment"); /* * currently GuestLAN only supports the ARP assist function @@ -2595,7 +2593,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) int rc; int tmp; - QETH_DBF_TEXT(TRACE, 3, "arpflush"); + QETH_CARD_TEXT(card, 3, "arpflush"); /* * currently GuestLAN only supports the ARP assist function @@ -2680,7 +2678,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); break; case SIOC_QETH_GET_CARD_TYPE: - if ((card->info.type == QETH_CARD_TYPE_OSAE) && + if ((card->info.type == QETH_CARD_TYPE_OSD || + card->info.type == QETH_CARD_TYPE_OSX) && !card->info.guestlan) return 1; return 0; @@ -2698,20 +2697,31 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) mii_data->phy_id, mii_data->reg_num); break; + case SIOC_QETH_QUERY_OAT: + rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); + break; default: rc = -EOPNOTSUPP; } if (rc) - QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc); + QETH_CARD_TEXT_(card, 2, "ioce%d", rc); return rc; } int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { int cast_type = RTN_UNSPEC; - - if (skb_dst(skb) && skb_dst(skb)->neighbour) { - cast_type = skb_dst(skb)->neighbour->type; + struct neighbour *n = NULL; + struct dst_entry *dst; + + rcu_read_lock(); + dst = skb_dst(skb); + if (dst) + n = dst_neigh_lookup_skb(dst, skb); + if (n) { + cast_type = n->type; + rcu_read_unlock(); + neigh_release(n); if ((cast_type == RTN_BROADCAST) || (cast_type == RTN_MULTICAST) || (cast_type == RTN_ANYCAST)) @@ -2719,6 +2729,8 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) else return RTN_UNSPEC; } + rcu_read_unlock(); + /* try something else */ if (skb->protocol == ETH_P_IPV6) return (skb_network_header(skb)[24] == 0xff) ? @@ -2751,9 +2763,35 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) return cast_type; } +static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, + struct qeth_hdr *hdr, struct sk_buff *skb) +{ + char daddr[16]; + struct af_iucv_trans_hdr *iucv_hdr; + + skb_pull(skb, 14); + card->dev->header_ops->create(skb, card->dev, 0, + card->dev->dev_addr, card->dev->dev_addr, + card->dev->addr_len); + skb_pull(skb, 14); + iucv_hdr = (struct af_iucv_trans_hdr *)skb->data; + memset(hdr, 0, sizeof(struct qeth_hdr)); + hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; + hdr->hdr.l3.ext_flags = 0; + hdr->hdr.l3.length = skb->len; + hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; + memset(daddr, 0, sizeof(daddr)); + daddr[0] = 0xfe; + daddr[1] = 0x80; + memcpy(&daddr[8], iucv_hdr->destUserID, 8); + memcpy(hdr->hdr.l3.dest_addr, daddr, 16); +} + static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, struct sk_buff *skb, int ipv, int cast_type) { + struct dst_entry *dst; + memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; hdr->hdr.l3.ext_flags = 0; @@ -2762,7 +2800,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, * before we're going to overwrite this location with next hop ip. * v6 uses passthrough, v4 sets the tag in the QDIO header. */ - if (card->vlangrp && vlan_tx_tag_present(skb)) { + if (vlan_tx_tag_present(skb)) { if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD)) hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME; else @@ -2771,39 +2809,34 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, } hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr); + + rcu_read_lock(); + dst = skb_dst(skb); if (ipv == 4) { + struct rtable *rt = (struct rtable *) dst; + __be32 *pkey = &ip_hdr(skb)->daddr; + + if (rt->rt_gateway) + pkey = &rt->rt_gateway; + /* IPv4 */ hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type); memset(hdr->hdr.l3.dest_addr, 0, 12); - if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) { - *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = - *((u32 *) skb_dst(skb)->neighbour->primary_key); - } else { - /* fill in destination address used in ip header */ - *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = - ip_hdr(skb)->daddr; - } + *((__be32 *) (&hdr->hdr.l3.dest_addr[12])) = *pkey; } else if (ipv == 6) { + struct rt6_info *rt = (struct rt6_info *) dst; + struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; + + if (!ipv6_addr_any(&rt->rt6i_gateway)) + pkey = &rt->rt6i_gateway; + /* IPv6 */ hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type); if (card->info.type == QETH_CARD_TYPE_IQD) hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU; - if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) { - memcpy(hdr->hdr.l3.dest_addr, - skb_dst(skb)->neighbour->primary_key, 16); - } else { - /* fill in destination address used in ip header */ - memcpy(hdr->hdr.l3.dest_addr, - &ipv6_hdr(skb)->daddr, 16); - } + memcpy(hdr->hdr.l3.dest_addr, pkey, 16); } else { - /* passthrough */ - if ((skb->dev->type == ARPHRD_IEEE802_TR) && - !memcmp(skb->data + sizeof(struct qeth_hdr) + - sizeof(__u16), skb->dev->broadcast, 6)) { - hdr->hdr.l3.flags = QETH_CAST_BROADCAST | - QETH_HDR_PASSTHRU; - } else if (!memcmp(skb->data + sizeof(struct qeth_hdr), + if (!memcmp(skb->data + sizeof(struct qeth_hdr), skb->dev->broadcast, 6)) { /* broadcast? */ hdr->hdr.l3.flags = QETH_CAST_BROADCAST | @@ -2814,6 +2847,24 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, QETH_CAST_UNICAST | QETH_HDR_PASSTHRU; } } + rcu_read_unlock(); +} + +static inline void qeth_l3_hdr_csum(struct qeth_card *card, + struct qeth_hdr *hdr, struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + + /* tcph->check contains already the pseudo hdr checksum + * so just set the header flags + */ + if (iph->protocol == IPPROTO_UDP) + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP; + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ | + QETH_HDR_EXT_CSUM_HDR_REQ; + iph->check = 0; + if (card->options.performance_stats) + card->perf_stats.tx_csum++; } static void qeth_tso_fill_header(struct qeth_card *card, @@ -2826,6 +2877,7 @@ static void qeth_tso_fill_header(struct qeth_card *card, /*fix header to TSO values ...*/ hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; + hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso); /*set values which are fix for the first approach ...*/ hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); hdr->ext.imb_hdr_no = 1; @@ -2851,37 +2903,16 @@ static void qeth_tso_fill_header(struct qeth_card *card, } } -static void qeth_tx_csum(struct sk_buff *skb) -{ - __wsum csum; - int offset; - - skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb)); - offset = skb->csum_start - skb_headroom(skb); - BUG_ON(offset >= skb_headlen(skb)); - csum = skb_checksum(skb, offset, skb->len - offset, 0); - - offset += skb->csum_offset; - BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); - *(__sum16 *)(skb->data + offset) = csum_fold(csum); -} - static inline int qeth_l3_tso_elements(struct sk_buff *skb) { unsigned long tcpd = (unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4; int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); - int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd); - elements += skb_shinfo(skb)->nr_frags; - return elements; -} + int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); -static inline int qeth_l3_tso_check(struct sk_buff *skb) -{ - int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) - - (unsigned long)skb->data; - return (((unsigned long)skb->data & PAGE_MASK) != - (((unsigned long)skb->data + len) & PAGE_MASK)); + elements += qeth_get_elements_for_frags(skb); + + return elements; } static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -2895,15 +2926,21 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) struct sk_buff *new_skb = NULL; int ipv = qeth_get_ip_version(skb); int cast_type = qeth_l3_get_cast_type(card, skb); - struct qeth_qdio_out_q *queue = card->qdio.out_qs - [qeth_get_priority_queue(card, skb, ipv, cast_type)]; + struct qeth_qdio_out_q *queue = + card->qdio.out_qs[card->qdio.do_prio_queueing + || (cast_type && card->info.is_multicast_different) ? + qeth_get_priority_queue(card, skb, ipv, cast_type) : + card->qdio.default_out_queue]; int tx_bytes = skb->len; - enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; + bool large_send; int data_offset = -1; int nr_frags; - if (((card->info.type == QETH_CARD_TYPE_IQD) && (!ipv)) || - card->options.sniffer) + if (((card->info.type == QETH_CARD_TYPE_IQD) && + (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || + ((card->options.cq == QETH_CQ_ENABLED) && + (skb->protocol != ETH_P_AF_IUCV)))) || + card->options.sniffer) goto tx_drop; if ((card->state != CARD_STATE_UP) || !card->lan_online) { @@ -2920,19 +2957,15 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) card->perf_stats.outbound_start_time = qeth_get_micros(); } - if (skb_is_gso(skb)) - large_send = card->options.large_send; - else - if (skb->ip_summed == CHECKSUM_PARTIAL) { - qeth_tx_csum(skb); - if (card->options.performance_stats) - card->perf_stats.tx_csum++; - } + large_send = skb_is_gso(skb); if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && (skb_shinfo(skb)->nr_frags == 0)) { new_skb = skb; - data_offset = ETH_HLEN; + if (new_skb->protocol == ETH_P_AF_IUCV) + data_offset = 0; + else + data_offset = ETH_HLEN; hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) goto tx_drop; @@ -2950,14 +2983,10 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_pull(new_skb, ETH_HLEN); } else { if (ipv == 4) { - if (card->dev->type == ARPHRD_IEEE802_TR) - skb_pull(new_skb, TR_HLEN); - else - skb_pull(new_skb, ETH_HLEN); + skb_pull(new_skb, ETH_HLEN); } - if (ipv == 6 && card->vlangrp && - vlan_tx_tag_present(new_skb)) { + if (ipv != 4 && vlan_tx_tag_present(new_skb)) { skb_push(new_skb, VLAN_HLEN); skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); skb_copy_to_linear_data_offset(new_skb, 4, @@ -2967,7 +2996,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) tag = (u16 *)(new_skb->data + 12); *tag = __constant_htons(ETH_P_8021Q); *(tag + 1) = htons(vlan_tx_tag_get(new_skb)); - new_skb->vlan_tci = 0; } } @@ -2976,7 +3004,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) /* fix hardware limitation: as long as we do not have sbal * chaining we can not send long frag lists */ - if (large_send == QETH_LARGE_SEND_TSO) { + if (large_send) { if (qeth_l3_tso_elements(new_skb) + 1 > 16) { if (skb_linearize(new_skb)) goto tx_drop; @@ -2985,12 +3013,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } } - if ((large_send == QETH_LARGE_SEND_TSO) && - (cast_type == RTN_UNSPEC)) { + if (large_send && (cast_type == RTN_UNSPEC)) { hdr = (struct qeth_hdr *)skb_push(new_skb, sizeof(struct qeth_hdr_tso)); - if (qeth_l3_tso_check(new_skb)) - QETH_DBF_MESSAGE(2, "tso skb misaligned\n"); memset(hdr, 0, sizeof(struct qeth_hdr_tso)); qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); qeth_tso_fill_header(card, hdr, new_skb); @@ -3002,14 +3027,20 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); } else { - qeth_l3_fill_header(card, hdr, new_skb, ipv, - cast_type); - hdr->hdr.l3.length = new_skb->len - data_offset; + if (new_skb->protocol == ETH_P_AF_IUCV) + qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb); + else { + qeth_l3_fill_header(card, hdr, new_skb, ipv, + cast_type); + hdr->hdr.l3.length = new_skb->len - data_offset; + } } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + qeth_l3_hdr_csum(card, hdr, new_skb); } - elems = qeth_get_elements_no(card, (void *)hdr, new_skb, - elements_needed); + elems = qeth_get_elements_no(card, new_skb, elements_needed); if (!elems) { if (data_offset >= 0) kmem_cache_free(qeth_core_header_cache, hdr); @@ -3018,10 +3049,20 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) elements_needed += elems; nr_frags = skb_shinfo(new_skb)->nr_frags; - if (card->info.type != QETH_CARD_TYPE_IQD) + if (card->info.type != QETH_CARD_TYPE_IQD) { + int len; + if (large_send) + len = ((unsigned long)tcp_hdr(new_skb) + + tcp_hdr(new_skb)->doff * 4) - + (unsigned long)new_skb->data; + else + len = sizeof(struct qeth_hdr_layer3); + + if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) + goto tx_drop; rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements_needed); - else + } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, elements_needed, data_offset, 0); @@ -3031,7 +3072,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if (new_skb != skb) dev_kfree_skb_any(skb); if (card->options.performance_stats) { - if (large_send != QETH_LARGE_SEND_NO) { + if (large_send) { card->perf_stats.large_send_bytes += tx_bytes; card->perf_stats.large_send_cnt++; } @@ -3070,77 +3111,91 @@ tx_drop: return NETDEV_TX_OK; } -static int qeth_l3_open(struct net_device *dev) +static int __qeth_l3_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; + int rc = 0; - QETH_DBF_TEXT(TRACE, 4, "qethopen"); + QETH_CARD_TEXT(card, 4, "qethopen"); + if (card->state == CARD_STATE_UP) + return rc; if (card->state != CARD_STATE_SOFTSETUP) return -ENODEV; card->data.state = CH_STATE_UP; card->state = CARD_STATE_UP; netif_start_queue(dev); - if (!card->lan_online && netif_carrier_ok(dev)) - netif_carrier_off(dev); - return 0; + if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { + napi_enable(&card->napi); + napi_schedule(&card->napi); + } else + rc = -EIO; + return rc; +} + +static int qeth_l3_open(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 5, "qethope_"); + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { + QETH_CARD_TEXT(card, 3, "openREC"); + return -ERESTARTSYS; + } + return __qeth_l3_open(dev); } static int qeth_l3_stop(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - QETH_DBF_TEXT(TRACE, 4, "qethstop"); + QETH_CARD_TEXT(card, 4, "qethstop"); netif_tx_disable(dev); - if (card->state == CARD_STATE_UP) + if (card->state == CARD_STATE_UP) { card->state = CARD_STATE_SOFTSETUP; + napi_disable(&card->napi); + } return 0; } -static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev) +static netdev_features_t qeth_l3_fix_features(struct net_device *dev, + netdev_features_t features) { struct qeth_card *card = dev->ml_priv; - return (card->options.checksum_type == HW_CHECKSUMMING); + if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) + features &= ~NETIF_F_IP_CSUM; + if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) + features &= ~NETIF_F_TSO; + if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) + features &= ~NETIF_F_RXCSUM; + + return features; } -static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) +static int qeth_l3_set_features(struct net_device *dev, + netdev_features_t features) { struct qeth_card *card = dev->ml_priv; - enum qeth_checksum_types csum_type; + u32 changed = dev->features ^ features; + int err; - if (data) - csum_type = HW_CHECKSUMMING; - else - csum_type = SW_CHECKSUMMING; + if (!(changed & NETIF_F_RXCSUM)) + return 0; - return qeth_l3_set_rx_csum(card, csum_type); -} + if (card->state == CARD_STATE_DOWN || + card->state == CARD_STATE_RECOVER) + return 0; -static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) -{ - struct qeth_card *card = dev->ml_priv; - int rc = 0; + err = qeth_l3_set_rx_csum(card, features & NETIF_F_RXCSUM); + if (err) + dev->features = features ^ NETIF_F_RXCSUM; - if (data) { - rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO); - } else { - dev->features &= ~NETIF_F_TSO; - card->options.large_send = QETH_LARGE_SEND_NO; - } - return rc; + return err; } static const struct ethtool_ops qeth_l3_ethtool_ops = { .get_link = ethtool_op_get_link, - .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = ethtool_op_set_tx_hw_csum, - .get_rx_csum = qeth_l3_ethtool_get_rx_csum, - .set_rx_csum = qeth_l3_ethtool_set_rx_csum, - .get_sg = ethtool_op_get_sg, - .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, - .set_tso = qeth_l3_ethtool_set_tso, .get_strings = qeth_core_get_strings, .get_ethtool_stats = qeth_core_get_ethtool_stats, .get_sset_count = qeth_core_get_sset_count, @@ -3178,13 +3233,14 @@ static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = qeth_l3_set_multicast_list, - .ndo_do_ioctl = qeth_l3_do_ioctl, - .ndo_change_mtu = qeth_change_mtu, - .ndo_vlan_rx_register = qeth_l3_vlan_rx_register, + .ndo_set_rx_mode = qeth_l3_set_multicast_list, + .ndo_do_ioctl = qeth_l3_do_ioctl, + .ndo_change_mtu = qeth_change_mtu, + .ndo_fix_features = qeth_l3_fix_features, + .ndo_set_features = qeth_l3_set_features, .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, - .ndo_tx_timeout = qeth_tx_timeout, + .ndo_tx_timeout = qeth_tx_timeout, }; static const struct net_device_ops qeth_l3_osa_netdev_ops = { @@ -3193,27 +3249,25 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = qeth_l3_set_multicast_list, - .ndo_do_ioctl = qeth_l3_do_ioctl, - .ndo_change_mtu = qeth_change_mtu, - .ndo_vlan_rx_register = qeth_l3_vlan_rx_register, + .ndo_set_rx_mode = qeth_l3_set_multicast_list, + .ndo_do_ioctl = qeth_l3_do_ioctl, + .ndo_change_mtu = qeth_change_mtu, + .ndo_fix_features = qeth_l3_fix_features, + .ndo_set_features = qeth_l3_set_features, .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, - .ndo_tx_timeout = qeth_tx_timeout, + .ndo_tx_timeout = qeth_tx_timeout, .ndo_neigh_setup = qeth_l3_neigh_setup, }; static int qeth_l3_setup_netdev(struct qeth_card *card) { - if (card->info.type == QETH_CARD_TYPE_OSAE) { + if (card->info.type == QETH_CARD_TYPE_OSD || + card->info.type == QETH_CARD_TYPE_OSX) { if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || (card->info.link_type == QETH_LINK_TYPE_HSTR)) { -#ifdef CONFIG_TR - card->dev = alloc_trdev(0); -#endif - if (!card->dev) - return -ENODEV; - card->dev->netdev_ops = &qeth_l3_netdev_ops; + pr_info("qeth_l3: ignoring TR device\n"); + return -ENODEV; } else { card->dev = alloc_etherdev(0); if (!card->dev) @@ -3225,6 +3279,12 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) card->dev->dev_id = card->info.unique_id & 0xffff; + if (!card->info.guestlan) { + card->dev->hw_features = NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_TSO; + card->dev->features = NETIF_F_RXCSUM; + } } } else if (card->info.type == QETH_CARD_TYPE_IQD) { card->dev = alloc_netdev(0, "hsi%d", ether_setup); @@ -3233,75 +3293,33 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; qeth_l3_iqd_read_initial_mac(card); + if (card->options.hsuid[0]) + memcpy(card->dev->perm_addr, card->options.hsuid, 9); } else return -ENODEV; card->dev->ml_priv = card; card->dev->watchdog_timeo = QETH_TX_TIMEOUT; card->dev->mtu = card->info.initial_mtu; - SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); - card->dev->features |= NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + card->dev->ethtool_ops = &qeth_l3_ethtool_ops; + card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; card->dev->gso_max_size = 15 * PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); + netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT); return register_netdev(card->dev); } -static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, - unsigned int qdio_err, unsigned int queue, int first_element, - int count, unsigned long card_ptr) -{ - struct net_device *net_dev; - struct qeth_card *card; - struct qeth_qdio_buffer *buffer; - int index; - int i; - - card = (struct qeth_card *) card_ptr; - net_dev = card->dev; - if (card->options.performance_stats) { - card->perf_stats.inbound_cnt++; - card->perf_stats.inbound_start_time = qeth_get_micros(); - } - if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 1, "qdinchk"); - QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", - first_element, count); - QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); - qeth_schedule_recovery(card); - return; - } - for (i = first_element; i < (first_element + count); ++i) { - index = i % QDIO_MAX_BUFFERS_PER_Q; - buffer = &card->qdio.in_q->bufs[index]; - if (!(qdio_err && - qeth_check_qdio_errors(card, buffer->buffer, - qdio_err, "qinerr"))) - qeth_l3_process_inbound_buffer(card, buffer, index); - /* clear buffer and give back to hardware */ - qeth_put_buffer_pool_entry(card, buffer->pool_entry); - qeth_queue_input_buffer(card, index); - } - if (card->options.performance_stats) - card->perf_stats.inbound_time += qeth_get_micros() - - card->perf_stats.inbound_start_time; -} - static int qeth_l3_probe_device(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); qeth_l3_create_device_attributes(&gdev->dev); card->options.layer2 = 0; - card->discipline.input_handler = (qdio_handler_t *) - qeth_l3_qdio_input_handler; - card->discipline.output_handler = (qdio_handler_t *) - qeth_qdio_output_handler; - card->discipline.recover = qeth_l3_recover; + card->info.hwtrap = 0; return 0; } @@ -3309,21 +3327,20 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) { struct qeth_card *card = dev_get_drvdata(&cgdev->dev); + qeth_l3_remove_device_attributes(&cgdev->dev); + qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); - if (cgdev->state == CCWGROUP_ONLINE) { - card->use_hard_stop = 1; + if (cgdev->state == CCWGROUP_ONLINE) qeth_l3_set_offline(cgdev); - } if (card->dev) { unregister_netdev(card->dev); card->dev = NULL; } - qeth_l3_remove_device_attributes(&cgdev->dev); - qeth_l3_clear_ip_list(card, 0, 0); + qeth_l3_clear_ip_list(card, 0); qeth_l3_clear_ipato_list(card); return; } @@ -3334,7 +3351,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) int rc = 0; enum qeth_card_states recover_flag; - BUG_ON(!card); + mutex_lock(&card->discipline_mutex); + mutex_lock(&card->conf_mutex); QETH_DBF_TEXT(SETUP, 2, "setonlin"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); @@ -3346,14 +3364,20 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } - qeth_l3_query_ipassists(card, QETH_PROT_IPV4); - if (!card->dev && qeth_l3_setup_netdev(card)) { rc = -ENODEV; goto out_remove; } + if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { + if (card->info.hwtrap && + qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)) + card->info.hwtrap = 0; + } else + card->info.hwtrap = 0; + card->state = CARD_STATE_HARDSETUP; + memset(&card->rx, 0, sizeof(struct qeth_rx)); qeth_print_status_message(card); /* softsetup */ @@ -3366,21 +3390,23 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) dev_warn(&card->gdev->dev, "The LAN is offline\n"); card->lan_online = 0; - return 0; + goto contin; } rc = -ENODEV; goto out_remove; } else card->lan_online = 1; +contin: rc = qeth_l3_setadapter_parms(card); if (rc) QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); if (!card->options.sniffer) { rc = qeth_l3_start_ipassists(card); - if (rc) + if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); - qeth_l3_set_large_send(card, card->options.large_send); + goto out_remove; + } rc = qeth_l3_setrouting_v4(card); if (rc) QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); @@ -3397,33 +3423,40 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } card->state = CARD_STATE_SOFTSETUP; - netif_carrier_on(card->dev); qeth_set_allowed_threads(card, 0xffffffff, 0); qeth_l3_set_ip_addr_list(card); + if (card->lan_online) + netif_carrier_on(card->dev); + else + netif_carrier_off(card->dev); if (recover_flag == CARD_STATE_RECOVER) { + rtnl_lock(); if (recovery_mode) - qeth_l3_open(card->dev); - else { - rtnl_lock(); + __qeth_l3_open(card->dev); + else dev_open(card->dev); - rtnl_unlock(); - } qeth_l3_set_multicast_list(card->dev); + rtnl_unlock(); } + qeth_trace_features(card); /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return 0; out_remove: - card->use_hard_stop = 1; qeth_l3_stop_card(card, 0); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); + qdio_free(CARD_DDEV(card)); if (recover_flag == CARD_STATE_RECOVER) card->state = CARD_STATE_RECOVER; else card->state = CARD_STATE_DOWN; + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return rc; } @@ -3439,13 +3472,24 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, int rc = 0, rc2 = 0, rc3 = 0; enum qeth_card_states recover_flag; + mutex_lock(&card->discipline_mutex); + mutex_lock(&card->conf_mutex); QETH_DBF_TEXT(SETUP, 3, "setoffl"); QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); if (card->dev && netif_carrier_ok(card->dev)) netif_carrier_off(card->dev); recover_flag = card->state; + if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + card->info.hwtrap = 1; + } qeth_l3_stop_card(card, recovery_mode); + if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) { + rtnl_lock(); + call_netdevice_notifiers(NETDEV_REBOOT, card->dev); + rtnl_unlock(); + } rc = ccw_device_set_offline(CARD_DDEV(card)); rc2 = ccw_device_set_offline(CARD_WDEV(card)); rc3 = ccw_device_set_offline(CARD_RDEV(card)); @@ -3453,10 +3497,13 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, rc = (rc2) ? rc2 : rc3; if (rc) QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + qdio_free(CARD_DDEV(card)); if (recover_flag == CARD_STATE_UP) card->state = CARD_STATE_RECOVER; /* let user_space know that device is offline */ kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); return 0; } @@ -3471,38 +3518,39 @@ static int qeth_l3_recover(void *ptr) int rc = 0; card = (struct qeth_card *) ptr; - QETH_DBF_TEXT(TRACE, 2, "recover1"); - QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); + QETH_CARD_TEXT(card, 2, "recover1"); + QETH_CARD_HEX(card, 2, &card, sizeof(void *)); if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) return 0; - QETH_DBF_TEXT(TRACE, 2, "recover2"); + QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); - card->use_hard_stop = 1; + qeth_set_recovery_task(card); __qeth_l3_set_offline(card->gdev, 1); rc = __qeth_l3_set_online(card->gdev, 1); - /* don't run another scheduled recovery */ - qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); - qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); if (!rc) dev_info(&card->gdev->dev, "Device successfully recovered!\n"); else { - rtnl_lock(); - dev_close(card->dev); - rtnl_unlock(); + qeth_close_dev(card); dev_warn(&card->gdev->dev, "The qeth device driver " - "failed to recover an error on the device\n"); + "failed to recover an error on the device\n"); } + qeth_clear_recovery_task(card); + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); return 0; } static void qeth_l3_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - qeth_l3_clear_ip_list(card, 0, 0); + qeth_set_allowed_threads(card, 0, 1); + if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_qdio_clear_card(card, 0); qeth_clear_qdio_buffers(card); + qdio_free(CARD_DDEV(card)); } static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) @@ -3516,7 +3564,8 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) if (gdev->state == CCWGROUP_OFFLINE) return 0; if (card->state == CARD_STATE_UP) { - card->use_hard_stop = 1; + if (card->info.hwtrap) + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); __qeth_l3_set_offline(card->gdev, 1); } else __qeth_l3_set_offline(card->gdev, 0); @@ -3550,8 +3599,19 @@ out: return rc; } -struct ccwgroup_driver qeth_l3_ccwgroup_driver = { - .probe = qeth_l3_probe_device, +/* Returns zero if the command is successfully "consumed" */ +static int qeth_l3_control_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + return 1; +} + +struct qeth_discipline qeth_l3_discipline = { + .start_poll = qeth_qdio_start_poll, + .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, + .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, + .recover = qeth_l3_recover, + .setup = qeth_l3_probe_device, .remove = qeth_l3_remove_device, .set_online = qeth_l3_set_online, .set_offline = qeth_l3_set_offline, @@ -3559,8 +3619,9 @@ struct ccwgroup_driver qeth_l3_ccwgroup_driver = { .freeze = qeth_l3_pm_suspend, .thaw = qeth_l3_pm_resume, .restore = qeth_l3_pm_resume, + .control_event_handler = qeth_l3_control_event, }; -EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver); +EXPORT_SYMBOL_GPL(qeth_l3_discipline); static int qeth_l3_ip_event(struct notifier_block *this, unsigned long event, void *ptr) @@ -3573,10 +3634,10 @@ static int qeth_l3_ip_event(struct notifier_block *this, if (dev_net(dev) != &init_net) return NOTIFY_DONE; - QETH_DBF_TEXT(TRACE, 3, "ipevent"); card = qeth_l3_get_card_from_dev(dev); if (!card) return NOTIFY_DONE; + QETH_CARD_TEXT(card, 3, "ipevent"); addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); if (addr != NULL) { @@ -3620,11 +3681,10 @@ static int qeth_l3_ip6_event(struct notifier_block *this, struct qeth_ipaddr *addr; struct qeth_card *card; - QETH_DBF_TEXT(TRACE, 3, "ip6event"); - card = qeth_l3_get_card_from_dev(dev); if (!card) return NOTIFY_DONE; + QETH_CARD_TEXT(card, 3, "ip6event"); if (!qeth_is_supported(card, IPA_IPV6)) return NOTIFY_DONE; @@ -3663,7 +3723,7 @@ static int qeth_l3_register_notifiers(void) { int rc; - QETH_DBF_TEXT(TRACE, 5, "regnotif"); + QETH_DBF_TEXT(SETUP, 5, "regnotif"); rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); if (rc) return rc; @@ -3682,10 +3742,10 @@ static int qeth_l3_register_notifiers(void) static void qeth_l3_unregister_notifiers(void) { - QETH_DBF_TEXT(TRACE, 5, "unregnot"); - BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); + QETH_DBF_TEXT(SETUP, 5, "unregnot"); + WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); #ifdef CONFIG_QETH_IPV6 - BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); + WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); #endif /* QETH_IPV6 */ } diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 3f08b11274a..adef5f5de11 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -1,6 +1,4 @@ /* - * drivers/s390/net/qeth_l3_sys.c - * * Copyright IBM Corp. 2007 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, @@ -8,21 +6,13 @@ * Frank Blaschka <frank.blaschka@de.ibm.com> */ +#include <linux/slab.h> +#include <asm/ebcdic.h> #include "qeth_l3.h" #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) -static const char *qeth_l3_get_checksum_str(struct qeth_card *card) -{ - if (card->options.checksum_type == SW_CHECKSUMMING) - return "sw"; - else if (card->options.checksum_type == HW_CHECKSUMMING) - return "hw"; - else - return "no"; -} - static ssize_t qeth_l3_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, char *buf) { @@ -68,10 +58,10 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, { enum qeth_routing_types old_route_type = route->type; char *tmp; - int rc; + int rc = 0; tmp = strsep((char **) &buf, "\n"); - + mutex_lock(&card->conf_mutex); if (!strcmp(tmp, "no_router")) { route->type = NO_ROUTER; } else if (!strcmp(tmp, "primary_connector")) { @@ -85,7 +75,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, } else if (!strcmp(tmp, "multicast_router")) { route->type = MULTICAST_ROUTER; } else { - return -EINVAL; + rc = -EINVAL; + goto out; } if (((card->state == CARD_STATE_SOFTSETUP) || (card->state == CARD_STATE_UP)) && @@ -95,7 +86,11 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, else if (prot == QETH_PROT_IPV6) rc = qeth_l3_setrouting_v6(card); } - return count; +out: + if (rc) + route->type = old_route_type; + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static ssize_t qeth_l3_dev_route4_store(struct device *dev, @@ -155,170 +150,31 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); char *tmp; - int i; + int i, rc = 0; if (!card) return -EINVAL; + mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) - return -EPERM; + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } i = simple_strtoul(buf, &tmp, 16); if ((i == 0) || (i == 1)) card->options.fake_broadcast = i; - else { - return -EINVAL; - } - return count; + else + rc = -EINVAL; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show, qeth_l3_dev_fake_broadcast_store); -static ssize_t qeth_l3_dev_broadcast_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || - (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) - return sprintf(buf, "n/a\n"); - - return sprintf(buf, "%s\n", (card->options.broadcast_mode == - QETH_TR_BROADCAST_ALLRINGS)? - "all rings":"local"); -} - -static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct qeth_card *card = dev_get_drvdata(dev); - char *tmp; - - if (!card) - return -EINVAL; - - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) - return -EPERM; - - if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || - (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { - return -EINVAL; - } - - tmp = strsep((char **) &buf, "\n"); - - if (!strcmp(tmp, "local")) { - card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL; - return count; - } else if (!strcmp(tmp, "all_rings")) { - card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; - return count; - } else { - return -EINVAL; - } - return count; -} - -static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show, - qeth_l3_dev_broadcast_mode_store); - -static ssize_t qeth_l3_dev_canonical_macaddr_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || - (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) - return sprintf(buf, "n/a\n"); - - return sprintf(buf, "%i\n", (card->options.macaddr_mode == - QETH_TR_MACADDR_CANONICAL)? 1:0); -} - -static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct qeth_card *card = dev_get_drvdata(dev); - char *tmp; - int i; - - if (!card) - return -EINVAL; - - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) - return -EPERM; - - if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || - (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { - return -EINVAL; - } - - i = simple_strtoul(buf, &tmp, 16); - if ((i == 0) || (i == 1)) - card->options.macaddr_mode = i? - QETH_TR_MACADDR_CANONICAL : - QETH_TR_MACADDR_NONCANONICAL; - else { - return -EINVAL; - } - return count; -} - -static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show, - qeth_l3_dev_canonical_macaddr_store); - -static ssize_t qeth_l3_dev_checksum_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return sprintf(buf, "%s checksumming\n", - qeth_l3_get_checksum_str(card)); -} - -static ssize_t qeth_l3_dev_checksum_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct qeth_card *card = dev_get_drvdata(dev); - enum qeth_checksum_types csum_type; - char *tmp; - int rc; - - if (!card) - return -EINVAL; - - tmp = strsep((char **) &buf, "\n"); - if (!strcmp(tmp, "sw_checksumming")) - csum_type = SW_CHECKSUMMING; - else if (!strcmp(tmp, "hw_checksumming")) - csum_type = HW_CHECKSUMMING; - else if (!strcmp(tmp, "no_checksumming")) - csum_type = NO_CHECKSUMMING; - else - return -EINVAL; - - rc = qeth_l3_set_rx_csum(card, csum_type); - if (rc) - return rc; - return count; -} - -static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show, - qeth_l3_dev_checksum_store); - static ssize_t qeth_l3_dev_sniffer_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -334,7 +190,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); - int ret; + int rc = 0; unsigned long i; if (!card) @@ -342,94 +198,156 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, if (card->info.type != QETH_CARD_TYPE_IQD) return -EPERM; + if (card->options.cq == QETH_CQ_ENABLED) + return -EPERM; + mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) - return -EPERM; + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } - ret = strict_strtoul(buf, 16, &i); - if (ret) - return -EINVAL; + rc = kstrtoul(buf, 16, &i); + if (rc) { + rc = -EINVAL; + goto out; + } switch (i) { case 0: card->options.sniffer = i; break; case 1: - ret = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); + qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) { card->options.sniffer = i; if (card->qdio.init_pool.buf_count != QETH_IN_BUF_COUNT_MAX) qeth_realloc_buffer_pool(card, QETH_IN_BUF_COUNT_MAX); - break; } else - return -EPERM; - default: /* fall through */ - return -EINVAL; + rc = -EPERM; + break; + default: + rc = -EINVAL; } - return count; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show, qeth_l3_dev_sniffer_store); -static ssize_t qeth_l3_dev_large_send_show(struct device *dev, - struct device_attribute *attr, char *buf) + +static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); + char tmp_hsuid[9]; if (!card) return -EINVAL; - switch (card->options.large_send) { - case QETH_LARGE_SEND_NO: - return sprintf(buf, "%s\n", "no"); - case QETH_LARGE_SEND_TSO: - return sprintf(buf, "%s\n", "TSO"); - default: - return sprintf(buf, "%s\n", "N/A"); - } + if (card->info.type != QETH_CARD_TYPE_IQD) + return -EPERM; + + if (card->state == CARD_STATE_DOWN) + return -EPERM; + + memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid)); + EBCASC(tmp_hsuid, 8); + return sprintf(buf, "%s\n", tmp_hsuid); } -static ssize_t qeth_l3_dev_large_send_store(struct device *dev, +static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); - enum qeth_large_send_types type; - int rc = 0; + struct qeth_ipaddr *addr; char *tmp; + int i; if (!card) return -EINVAL; - tmp = strsep((char **) &buf, "\n"); - if (!strcmp(tmp, "no")) - type = QETH_LARGE_SEND_NO; - else if (!strcmp(tmp, "TSO")) - type = QETH_LARGE_SEND_TSO; - else + + if (card->info.type != QETH_CARD_TYPE_IQD) + return -EPERM; + if (card->state != CARD_STATE_DOWN && + card->state != CARD_STATE_RECOVER) + return -EPERM; + if (card->options.sniffer) + return -EPERM; + if (card->options.cq == QETH_CQ_NOTAVAILABLE) + return -EPERM; + + tmp = strsep((char **)&buf, "\n"); + if (strlen(tmp) > 8) return -EINVAL; - if (card->options.large_send == type) + if (card->options.hsuid[0]) { + /* delete old ip address */ + addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); + if (addr != NULL) { + addr->u.a6.addr.s6_addr32[0] = 0xfe800000; + addr->u.a6.addr.s6_addr32[1] = 0x00000000; + for (i = 8; i < 16; i++) + addr->u.a6.addr.s6_addr[i] = + card->options.hsuid[i - 8]; + addr->u.a6.pfxlen = 0; + addr->type = QETH_IP_TYPE_NORMAL; + } else + return -ENOMEM; + if (!qeth_l3_delete_ip(card, addr)) + kfree(addr); + qeth_l3_set_ip_addr_list(card); + } + + if (strlen(tmp) == 0) { + /* delete ip address only */ + card->options.hsuid[0] = '\0'; + if (card->dev) + memcpy(card->dev->perm_addr, card->options.hsuid, 9); + qeth_configure_cq(card, QETH_CQ_DISABLED); return count; - rc = qeth_l3_set_large_send(card, type); - if (rc) - return rc; + } + + if (qeth_configure_cq(card, QETH_CQ_ENABLED)) + return -EPERM; + + snprintf(card->options.hsuid, sizeof(card->options.hsuid), + "%-8s", tmp); + ASCEBC(card->options.hsuid, 8); + if (card->dev) + memcpy(card->dev->perm_addr, card->options.hsuid, 9); + + addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); + if (addr != NULL) { + addr->u.a6.addr.s6_addr32[0] = 0xfe800000; + addr->u.a6.addr.s6_addr32[1] = 0x00000000; + for (i = 8; i < 16; i++) + addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8]; + addr->u.a6.pfxlen = 0; + addr->type = QETH_IP_TYPE_NORMAL; + } else + return -ENOMEM; + if (!qeth_l3_add_ip(card, addr)) + kfree(addr); + qeth_l3_set_ip_addr_list(card); + return count; } -static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show, - qeth_l3_dev_large_send_store); +static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show, + qeth_l3_dev_hsuid_store); + static struct attribute *qeth_l3_device_attrs[] = { &dev_attr_route4.attr, &dev_attr_route6.attr, &dev_attr_fake_broadcast.attr, - &dev_attr_broadcast_mode.attr, - &dev_attr_canonical_macaddr.attr, - &dev_attr_checksumming.attr, &dev_attr_sniffer.attr, - &dev_attr_large_send.attr, + &dev_attr_hsuid.attr, NULL, }; @@ -452,26 +370,45 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + struct qeth_ipaddr *tmpipa, *t; char *tmp; + int rc = 0; if (!card) return -EINVAL; + mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) - return -EPERM; + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } tmp = strsep((char **) &buf, "\n"); if (!strcmp(tmp, "toggle")) { card->ipato.enabled = (card->ipato.enabled)? 0 : 1; } else if (!strcmp(tmp, "1")) { card->ipato.enabled = 1; + list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { + if ((tmpipa->type == QETH_IP_TYPE_NORMAL) && + qeth_l3_is_addr_covered_by_ipato(card, tmpipa)) + tmpipa->set_flags |= + QETH_IPA_SETIP_TAKEOVER_FLAG; + } + } else if (!strcmp(tmp, "0")) { card->ipato.enabled = 0; - } else { - return -EINVAL; - } - return count; + list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { + if (tmpipa->set_flags & + QETH_IPA_SETIP_TAKEOVER_FLAG) + tmpipa->set_flags &= + ~QETH_IPA_SETIP_TAKEOVER_FLAG; + } + } else + rc = -EINVAL; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static QETH_DEVICE_ATTR(ipato_enable, enable, 0644, @@ -495,10 +432,12 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); char *tmp; + int rc = 0; if (!card) return -EINVAL; + mutex_lock(&card->conf_mutex); tmp = strsep((char **) &buf, "\n"); if (!strcmp(tmp, "toggle")) { card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; @@ -506,10 +445,10 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, card->ipato.invert4 = 1; } else if (!strcmp(tmp, "0")) { card->ipato.invert4 = 0; - } else { - return -EINVAL; - } - return count; + } else + rc = -EINVAL; + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644, @@ -591,27 +530,28 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count, struct qeth_ipato_entry *ipatoe; u8 addr[16]; int mask_bits; - int rc; + int rc = 0; + mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); if (rc) - return rc; + goto out; ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL); if (!ipatoe) { - return -ENOMEM; + rc = -ENOMEM; + goto out; } ipatoe->proto = proto; memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16); ipatoe->mask_bits = mask_bits; rc = qeth_l3_add_ipato_entry(card, ipatoe); - if (rc) { + if (rc) kfree(ipatoe); - return rc; - } - - return count; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev, @@ -634,15 +574,14 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count, { u8 addr[16]; int mask_bits; - int rc; + int rc = 0; + mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); - if (rc) - return rc; - - qeth_l3_del_ipato_entry(card, proto, addr, mask_bits); - - return count; + if (!rc) + qeth_l3_del_ipato_entry(card, proto, addr, mask_bits); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev, @@ -675,10 +614,12 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, { struct qeth_card *card = dev_get_drvdata(dev); char *tmp; + int rc = 0; if (!card) return -EINVAL; + mutex_lock(&card->conf_mutex); tmp = strsep((char **) &buf, "\n"); if (!strcmp(tmp, "toggle")) { card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; @@ -686,10 +627,10 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, card->ipato.invert6 = 1; } else if (!strcmp(tmp, "0")) { card->ipato.invert6 = 0; - } else { - return -EINVAL; - } - return count; + } else + rc = -EINVAL; + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644, @@ -811,15 +752,12 @@ static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count, u8 addr[16] = {0, }; int rc; + mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_vipae(buf, proto, addr); - if (rc) - return rc; - - rc = qeth_l3_add_vipa(card, proto, addr); - if (rc) - return rc; - - return count; + if (!rc) + rc = qeth_l3_add_vipa(card, proto, addr); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev, @@ -843,13 +781,12 @@ static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count, u8 addr[16]; int rc; + mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_vipae(buf, proto, addr); - if (rc) - return rc; - - qeth_l3_del_vipa(card, proto, addr); - - return count; + if (!rc) + qeth_l3_del_vipa(card, proto, addr); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev, @@ -977,15 +914,12 @@ static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count, u8 addr[16] = {0, }; int rc; + mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_rxipe(buf, proto, addr); - if (rc) - return rc; - - rc = qeth_l3_add_rxip(card, proto, addr); - if (rc) - return rc; - - return count; + if (!rc) + rc = qeth_l3_add_rxip(card, proto, addr); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev, @@ -1009,13 +943,12 @@ static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count, u8 addr[16]; int rc; + mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_rxipe(buf, proto, addr); - if (rc) - return rc; - - qeth_l3_del_rxip(card, proto, addr); - - return count; + if (!rc) + qeth_l3_del_rxip(card, proto, addr); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; } static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev, diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index ecef1edee70..d8f990b6b33 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -24,6 +24,7 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/device.h> +#include <linux/slab.h> #include <net/iucv/iucv.h> #include <asm/cpcmd.h> #include <asm/ebcdic.h> @@ -46,6 +47,7 @@ static struct device *smsg_dev; static DEFINE_SPINLOCK(smsg_list_lock); static LIST_HEAD(smsg_list); +static int iucv_path_connected; static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); static void smsg_message_pending(struct iucv_path *, struct iucv_message *); @@ -58,7 +60,7 @@ static struct iucv_handler smsg_handler = { static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8], u8 ipuser[16]) { - if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0) + if (strncmp(ipvmid, "*MSG ", 8) != 0) return -EINVAL; /* Path pending from *MSG. */ return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); @@ -141,8 +143,10 @@ static int smsg_pm_freeze(struct device *dev) #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "smsg_pm_freeze\n"); #endif - if (smsg_path) + if (smsg_path && iucv_path_connected) { iucv_path_sever(smsg_path, NULL); + iucv_path_connected = 0; + } return 0; } @@ -153,7 +157,7 @@ static int smsg_pm_restore_thaw(struct device *dev) #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "smsg_pm_restore_thaw\n"); #endif - if (smsg_path) { + if (smsg_path && !iucv_path_connected) { memset(smsg_path, 0, sizeof(*smsg_path)); smsg_path->msglim = 255; smsg_path->flags = 0; @@ -164,6 +168,8 @@ static int smsg_pm_restore_thaw(struct device *dev) printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc); #endif + if (!rc) + iucv_path_connected = 1; cpcmd("SET SMSG IUCV", NULL, 0, NULL); } return 0; @@ -213,6 +219,8 @@ static int __init smsg_init(void) NULL, NULL, NULL); if (rc) goto out_free_path; + else + iucv_path_connected = 1; smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!smsg_dev) { rc = -ENOMEM; diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h index 149a1151608..45bc925928c 100644 --- a/drivers/s390/net/smsgiucv.h +++ b/drivers/s390/net/smsgiucv.h @@ -1,7 +1,7 @@ /* * IUCV special message driver * - * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2003 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) */ diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c index 91579dc6a2b..32515a201bb 100644 --- a/drivers/s390/net/smsgiucv_app.c +++ b/drivers/s390/net/smsgiucv_app.c @@ -18,6 +18,7 @@ #include <linux/list.h> #include <linux/kobject.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <net/iucv/iucv.h> @@ -167,7 +168,7 @@ static int __init smsgiucv_app_init(void) rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT); if (rc) { kfree(smsg_app_dev); - goto fail_put_driver; + goto fail; } smsg_app_dev->bus = &iucv_bus; smsg_app_dev->parent = iucv_root; @@ -176,19 +177,25 @@ static int __init smsgiucv_app_init(void) rc = device_register(smsg_app_dev); if (rc) { put_device(smsg_app_dev); - goto fail_put_driver; + goto fail; + } + + /* convert sender to uppercase characters */ + if (sender) { + int len = strlen(sender); + while (len--) + sender[len] = toupper(sender[len]); } /* register with the smsgiucv device driver */ rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback); if (rc) { device_unregister(smsg_app_dev); - goto fail_put_driver; + goto fail; } rc = 0; -fail_put_driver: - put_driver(smsgiucv_drv); +fail: return rc; } module_init(smsgiucv_app_init); diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile index cb301cc6178..9259039e886 100644 --- a/drivers/s390/scsi/Makefile +++ b/drivers/s390/scsi/Makefile @@ -2,7 +2,8 @@ # Makefile for the S/390 specific device drivers # -zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ - zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o +zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \ + zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \ + zfcp_unit.o obj-$(CONFIG_ZFCP) += zfcp.o diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 66d6c01fcf3..8004b071a9f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -3,7 +3,7 @@ * * Module interface and handling of zfcp data structures. * - * Copyright IBM Corporation 2002, 2010 + * Copyright IBM Corp. 2002, 2013 */ /* @@ -23,6 +23,7 @@ * Christof Schmitt * Martin Petermann * Sven Schuetz + * Steffen Maier */ #define KMSG_COMPONENT "zfcp" @@ -30,6 +31,8 @@ #include <linux/miscdevice.h> #include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/module.h> #include "zfcp_ext.h" #include "zfcp_fc.h" #include "zfcp_reqlist.h" @@ -44,8 +47,8 @@ static char *init_device; module_param_named(device, init_device, charp, 0400); MODULE_PARM_DESC(device, "specify initial device"); -static struct kmem_cache *zfcp_cache_hw_align(const char *name, - unsigned long size) +static struct kmem_cache * __init zfcp_cache_hw_align(const char *name, + unsigned long size) { return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); } @@ -55,7 +58,6 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) struct ccw_device *cdev; struct zfcp_adapter *adapter; struct zfcp_port *port; - struct zfcp_unit *unit; cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); if (!cdev) @@ -71,17 +73,11 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) port = zfcp_get_port_by_wwpn(adapter, wwpn); if (!port) goto out_port; + flush_work(&port->rport_work); - unit = zfcp_unit_enqueue(port, lun); - if (IS_ERR(unit)) - goto out_unit; - - zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL); - zfcp_erp_wait(adapter); - flush_work(&unit->scsi_work); - -out_unit: + zfcp_unit_add(port, lun); put_device(&port->dev); + out_port: zfcp_ccw_adapter_put(adapter); out_ccw_device: @@ -97,24 +93,22 @@ static void __init zfcp_init_device_setup(char *devstr) u64 wwpn, lun; /* duplicate devstr and keep the original for sysfs presentation*/ - str_saved = kmalloc(strlen(devstr) + 1, GFP_KERNEL); + str_saved = kstrdup(devstr, GFP_KERNEL); str = str_saved; if (!str) return; - strcpy(str, devstr); - token = strsep(&str, ","); if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) goto err_out; strncpy(busid, token, ZFCP_BUS_ID_SIZE); token = strsep(&str, ","); - if (!token || strict_strtoull(token, 0, (unsigned long long *) &wwpn)) + if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn)) goto err_out; token = strsep(&str, ","); - if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun)) + if (!token || kstrtoull(token, 0, (unsigned long long *) &lun)) goto err_out; kfree(str_saved); @@ -130,41 +124,22 @@ static int __init zfcp_module_init(void) { int retval = -ENOMEM; - zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", - sizeof(struct zfcp_fc_gpn_ft_req)); - if (!zfcp_data.gpn_ft_cache) - goto out; - - zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb", - sizeof(struct fsf_qtcb)); - if (!zfcp_data.qtcb_cache) + zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb", + sizeof(struct fsf_qtcb)); + if (!zfcp_fsf_qtcb_cache) goto out_qtcb_cache; - zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr", - sizeof(struct fsf_status_read_buffer)); - if (!zfcp_data.sr_buffer_cache) - goto out_sr_cache; + zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req", + sizeof(struct zfcp_fc_req)); + if (!zfcp_fc_req_cache) + goto out_fc_cache; - zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", - sizeof(struct zfcp_fc_gid_pn)); - if (!zfcp_data.gid_pn_cache) - goto out_gid_cache; - - zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc", - sizeof(struct zfcp_fc_els_adisc)); - if (!zfcp_data.adisc_cache) - goto out_adisc_cache; - - zfcp_data.scsi_transport_template = + zfcp_scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); - if (!zfcp_data.scsi_transport_template) + if (!zfcp_scsi_transport_template) goto out_transport; - - retval = misc_register(&zfcp_cfdc_misc); - if (retval) { - pr_err("Registering the misc device zfcp_cfdc failed\n"); - goto out_misc; - } + scsi_transport_reserve_device(zfcp_scsi_transport_template, + sizeof(struct zfcp_scsi_dev)); retval = ccw_driver_register(&zfcp_ccw_driver); if (retval) { @@ -178,20 +153,12 @@ static int __init zfcp_module_init(void) return 0; out_ccw_register: - misc_deregister(&zfcp_cfdc_misc); -out_misc: - fc_release_transport(zfcp_data.scsi_transport_template); + fc_release_transport(zfcp_scsi_transport_template); out_transport: - kmem_cache_destroy(zfcp_data.adisc_cache); -out_adisc_cache: - kmem_cache_destroy(zfcp_data.gid_pn_cache); -out_gid_cache: - kmem_cache_destroy(zfcp_data.sr_buffer_cache); -out_sr_cache: - kmem_cache_destroy(zfcp_data.qtcb_cache); + kmem_cache_destroy(zfcp_fc_req_cache); +out_fc_cache: + kmem_cache_destroy(zfcp_fsf_qtcb_cache); out_qtcb_cache: - kmem_cache_destroy(zfcp_data.gpn_ft_cache); -out: return retval; } @@ -200,42 +167,14 @@ module_init(zfcp_module_init); static void __exit zfcp_module_exit(void) { ccw_driver_unregister(&zfcp_ccw_driver); - misc_deregister(&zfcp_cfdc_misc); - fc_release_transport(zfcp_data.scsi_transport_template); - kmem_cache_destroy(zfcp_data.adisc_cache); - kmem_cache_destroy(zfcp_data.gid_pn_cache); - kmem_cache_destroy(zfcp_data.sr_buffer_cache); - kmem_cache_destroy(zfcp_data.qtcb_cache); - kmem_cache_destroy(zfcp_data.gpn_ft_cache); + fc_release_transport(zfcp_scsi_transport_template); + kmem_cache_destroy(zfcp_fc_req_cache); + kmem_cache_destroy(zfcp_fsf_qtcb_cache); } module_exit(zfcp_module_exit); /** - * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN - * @port: pointer to port to search for unit - * @fcp_lun: FCP LUN to search for - * - * Returns: pointer to zfcp_unit or NULL - */ -struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) -{ - unsigned long flags; - struct zfcp_unit *unit; - - read_lock_irqsave(&port->unit_list_lock, flags); - list_for_each_entry(unit, &port->unit_list, list) - if (unit->fcp_lun == fcp_lun) { - if (!get_device(&unit->dev)) - unit = NULL; - read_unlock_irqrestore(&port->unit_list_lock, flags); - return unit; - } - read_unlock_irqrestore(&port->unit_list_lock, flags); - return NULL; -} - -/** * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn * @adapter: pointer to adapter to search for port * @wwpn: wwpn to search for @@ -260,92 +199,6 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, return NULL; } -/** - * zfcp_unit_release - dequeue unit - * @dev: pointer to device - * - * waits until all work is done on unit and removes it then from the unit->list - * of the associated port. - */ -static void zfcp_unit_release(struct device *dev) -{ - struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); - - put_device(&unit->port->dev); - kfree(unit); -} - -/** - * zfcp_unit_enqueue - enqueue unit to unit list of a port. - * @port: pointer to port where unit is added - * @fcp_lun: FCP LUN of unit to be enqueued - * Returns: pointer to enqueued unit on success, ERR_PTR on error - * - * Sets up some unit internal structures and creates sysfs entry. - */ -struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) -{ - struct zfcp_unit *unit; - int retval = -ENOMEM; - - get_device(&port->dev); - - unit = zfcp_get_unit_by_lun(port, fcp_lun); - if (unit) { - put_device(&unit->dev); - retval = -EEXIST; - goto err_out; - } - - unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); - if (!unit) - goto err_out; - - unit->port = port; - unit->fcp_lun = fcp_lun; - unit->dev.parent = &port->dev; - unit->dev.release = zfcp_unit_release; - - if (dev_set_name(&unit->dev, "0x%016llx", - (unsigned long long) fcp_lun)) { - kfree(unit); - goto err_out; - } - retval = -EINVAL; - - INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); - - spin_lock_init(&unit->latencies.lock); - unit->latencies.write.channel.min = 0xFFFFFFFF; - unit->latencies.write.fabric.min = 0xFFFFFFFF; - unit->latencies.read.channel.min = 0xFFFFFFFF; - unit->latencies.read.fabric.min = 0xFFFFFFFF; - unit->latencies.cmd.channel.min = 0xFFFFFFFF; - unit->latencies.cmd.fabric.min = 0xFFFFFFFF; - - if (device_register(&unit->dev)) { - put_device(&unit->dev); - goto err_out; - } - - if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) - goto err_out_put; - - write_lock_irq(&port->unit_list_lock); - list_add_tail(&unit->list, &port->unit_list); - write_unlock_irq(&port->unit_list_lock); - - atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); - - return unit; - -err_out_put: - device_unregister(&unit->dev); -err_out: - put_device(&port->dev); - return ERR_PTR(retval); -} - static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) { adapter->pool.erp_req = @@ -375,18 +228,18 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) return -ENOMEM; adapter->pool.qtcb_pool = - mempool_create_slab_pool(4, zfcp_data.qtcb_cache); + mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache); if (!adapter->pool.qtcb_pool) return -ENOMEM; - adapter->pool.status_read_data = - mempool_create_slab_pool(FSF_STATUS_READS_RECOM, - zfcp_data.sr_buffer_cache); - if (!adapter->pool.status_read_data) + BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE); + adapter->pool.sr_data = + mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0); + if (!adapter->pool.sr_data) return -ENOMEM; adapter->pool.gid_pn = - mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); + mempool_create_slab_pool(1, zfcp_fc_req_cache); if (!adapter->pool.gid_pn) return -ENOMEM; @@ -405,8 +258,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) mempool_destroy(adapter->pool.qtcb_pool); if (adapter->pool.status_read_req) mempool_destroy(adapter->pool.status_read_req); - if (adapter->pool.status_read_data) - mempool_destroy(adapter->pool.status_read_data); + if (adapter->pool.sr_data) + mempool_destroy(adapter->pool.sr_data); if (adapter->pool.gid_pn) mempool_destroy(adapter->pool.gid_pn); } @@ -424,9 +277,9 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter) { while (atomic_read(&adapter->stat_miss) > 0) if (zfcp_fsf_status_read(adapter->qdio)) { - if (atomic_read(&adapter->stat_miss) >= 16) { - zfcp_erp_adapter_reopen(adapter, 0, "axsref1", - NULL); + if (atomic_read(&adapter->stat_miss) >= + adapter->stat_read_buf_num) { + zfcp_erp_adapter_reopen(adapter, 0, "axsref1"); return 1; } break; @@ -501,6 +354,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports); + INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update); if (zfcp_qdio_setup(adapter)) goto failed; @@ -524,6 +378,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) rwlock_init(&adapter->port_list_lock); INIT_LIST_HEAD(&adapter->port_list); + INIT_LIST_HEAD(&adapter->events.list); + INIT_WORK(&adapter->events.work, zfcp_fc_post_event); + spin_lock_init(&adapter->events.list_lock); + init_waitqueue_head(&adapter->erp_ready_wq); init_waitqueue_head(&adapter->erp_done_wqh); @@ -544,7 +402,13 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) &zfcp_sysfs_adapter_attrs)) goto failed; - if (!zfcp_adapter_scsi_register(adapter)) + /* report size limit per scatter-gather segment */ + adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; + adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; + + adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; + + if (!zfcp_scsi_adapter_register(adapter)) return adapter; failed: @@ -558,14 +422,15 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter) cancel_work_sync(&adapter->scan_work); cancel_work_sync(&adapter->stat_work); + cancel_work_sync(&adapter->ns_up_work); zfcp_destroy_adapter_work_queue(adapter); zfcp_fc_wka_ports_force_offline(adapter->gs); - zfcp_adapter_scsi_unregister(adapter); + zfcp_scsi_adapter_unregister(adapter); sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); zfcp_erp_thread_kill(adapter); - zfcp_dbf_adapter_unregister(adapter->dbf); + zfcp_dbf_adapter_unregister(adapter); zfcp_qdio_destroy(adapter->qdio); zfcp_ccw_adapter_put(adapter); /* final put to release */ @@ -592,20 +457,6 @@ void zfcp_adapter_release(struct kref *ref) put_device(&cdev->dev); } -/** - * zfcp_device_unregister - remove port, unit from system - * @dev: reference to device which is to be removed - * @grp: related reference to attribute group - * - * Helper function to unregister port, unit from system - */ -void zfcp_device_unregister(struct device *dev, - const struct attribute_group *grp) -{ - sysfs_remove_group(&dev->kobj, grp); - device_unregister(dev); -} - static void zfcp_port_release(struct device *dev) { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); @@ -647,6 +498,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, rwlock_init(&port->unit_list_lock); INIT_LIST_HEAD(&port->unit_list); + atomic_set(&port->units, 0); INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); @@ -657,6 +509,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, port->wwpn = wwpn; port->rport_task = RPORT_NONE; port->dev.parent = &adapter->ccw_device->dev; + port->dev.groups = zfcp_port_attr_groups; port->dev.release = zfcp_port_release; if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { @@ -670,10 +523,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, goto err_out; } - if (sysfs_create_group(&port->dev.kobj, - &zfcp_sysfs_port_attrs)) - goto err_out_put; - write_lock_irq(&adapter->port_list_lock); list_add_tail(&port->list, &adapter->port_list); write_unlock_irq(&adapter->port_list_lock); @@ -682,8 +531,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, return port; -err_out_put: - device_unregister(&port->dev); err_out: zfcp_ccw_adapter_put(adapter); return ERR_PTR(retval); diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index ce1cc7a11fb..f9879d400d0 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -3,12 +3,13 @@ * * Registration and callback for the s390 common I/O layer. * - * Copyright IBM Corporation 2002, 2010 + * Copyright IBM Corp. 2002, 2010 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/module.h> #include "zfcp_ext.h" #include "zfcp_reqlist.h" @@ -38,20 +39,25 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter) spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); } -static int zfcp_ccw_activate(struct ccw_device *cdev) - +/** + * zfcp_ccw_activate - activate adapter and wait for it to finish + * @cdev: pointer to belonging ccw device + * @clear: Status flags to clear. + * @tag: s390dbf trace record tag + */ +static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; - zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_clear_adapter_status(adapter, clear); + zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, - "ccresu2", NULL); + tag); zfcp_erp_wait(adapter); - flush_work(&adapter->scan_work); + flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ zfcp_ccw_adapter_put(adapter); @@ -66,15 +72,6 @@ static struct ccw_device_id zfcp_ccw_device_id[] = { MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); /** - * zfcp_ccw_priv_sch - check if subchannel is privileged - * @adapter: Adapter/Subchannel to check - */ -int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter) -{ - return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV; -} - -/** * zfcp_ccw_probe - probe function of zfcp driver * @cdev: pointer to belonging ccw device * @@ -123,10 +120,10 @@ static void zfcp_ccw_remove(struct ccw_device *cdev) zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */ list_for_each_entry_safe(unit, u, &unit_remove_lh, list) - zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); + device_unregister(&unit->dev); list_for_each_entry_safe(port, p, &port_remove_lh, list) - zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); + device_unregister(&port->dev); zfcp_adapter_unregister(adapter); } @@ -164,33 +161,34 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev) BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); adapter->req_no = 0; - zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); - zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, - "ccsonl2", NULL); - zfcp_erp_wait(adapter); - - flush_work(&adapter->scan_work); - + zfcp_ccw_activate(cdev, 0, "ccsonl1"); + /* scan for remote ports + either at the end of any successful adapter recovery + or only after the adapter recovery for setting a device online */ + zfcp_fc_inverse_conditional_port_scan(adapter); + flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ zfcp_ccw_adapter_put(adapter); return 0; } /** - * zfcp_ccw_set_offline - set_offline function of zfcp driver + * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish * @cdev: pointer to belonging ccw device + * @set: Status flags to set. + * @tag: s390dbf trace record tag * * This function gets called by the common i/o layer and sets an adapter * into state offline. */ -static int zfcp_ccw_set_offline(struct ccw_device *cdev) +static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; - zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); + zfcp_erp_set_adapter_status(adapter, set); + zfcp_erp_adapter_shutdown(adapter, 0, tag); zfcp_erp_wait(adapter); zfcp_ccw_adapter_put(adapter); @@ -198,6 +196,18 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev) } /** + * zfcp_ccw_set_offline - set_offline function of zfcp driver + * @cdev: pointer to belonging ccw device + * + * This function gets called by the common i/o layer and sets an adapter + * into state offline. + */ +static int zfcp_ccw_set_offline(struct ccw_device *cdev) +{ + return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1"); +} + +/** * zfcp_ccw_notify - ccw notify function * @cdev: pointer to belonging ccw device * @event: indicates if adapter was detached or attached @@ -214,26 +224,35 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event) switch (event) { case CIO_GONE: + if (atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */ + zfcp_dbf_hba_basic("ccnigo1", adapter); + break; + } dev_warn(&cdev->dev, "The FCP device has been detached\n"); - zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL); + zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1"); break; case CIO_NO_PATH: dev_warn(&cdev->dev, "The CHPID for the FCP device is offline\n"); - zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL); + zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2"); break; case CIO_OPER: + if (atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */ + zfcp_dbf_hba_basic("ccniop1", adapter); + break; + } dev_info(&cdev->dev, "The FCP device is operational again\n"); - zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL, - ZFCP_STATUS_COMMON_RUNNING, - ZFCP_SET); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, - "ccnoti4", NULL); + "ccnoti4"); break; case CIO_BOXED: dev_warn(&cdev->dev, "The FCP device did not respond within " "the specified time\n"); - zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); + zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5"); break; } @@ -252,16 +271,40 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) if (!adapter) return; - zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); + zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1"); zfcp_erp_wait(adapter); zfcp_erp_thread_kill(adapter); zfcp_ccw_adapter_put(adapter); } +static int zfcp_ccw_suspend(struct ccw_device *cdev) +{ + zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1"); + return 0; +} + +static int zfcp_ccw_thaw(struct ccw_device *cdev) +{ + /* trace records for thaw and final shutdown during suspend + can only be found in system dump until the end of suspend + but not after resume because it's based on the memory image + right after the very first suspend (freeze) callback */ + zfcp_ccw_activate(cdev, 0, "ccthaw1"); + return 0; +} + +static int zfcp_ccw_resume(struct ccw_device *cdev) +{ + zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1"); + return 0; +} + struct ccw_driver zfcp_ccw_driver = { - .owner = THIS_MODULE, - .name = "zfcp", + .driver = { + .owner = THIS_MODULE, + .name = "zfcp", + }, .ids = zfcp_ccw_device_id, .probe = zfcp_ccw_probe, .remove = zfcp_ccw_remove, @@ -269,7 +312,7 @@ struct ccw_driver zfcp_ccw_driver = { .set_offline = zfcp_ccw_set_offline, .notify = zfcp_ccw_notify, .shutdown = zfcp_ccw_shutdown, - .freeze = zfcp_ccw_set_offline, - .thaw = zfcp_ccw_activate, - .restore = zfcp_ccw_activate, + .freeze = zfcp_ccw_suspend, + .thaw = zfcp_ccw_thaw, + .restore = zfcp_ccw_resume, }; diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c deleted file mode 100644 index 0eb6eefd2c1..00000000000 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ /dev/null @@ -1,266 +0,0 @@ -/* - * zfcp device driver - * - * Userspace interface for accessing the - * Access Control Lists / Control File Data Channel - * - * Copyright IBM Corporation 2008, 2009 - */ - -#define KMSG_COMPONENT "zfcp" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - -#include <linux/types.h> -#include <linux/miscdevice.h> -#include <asm/compat.h> -#include <asm/ccwdev.h> -#include "zfcp_def.h" -#include "zfcp_ext.h" -#include "zfcp_fsf.h" - -#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001 -#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101 -#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201 -#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401 -#define ZFCP_CFDC_CMND_UPLOAD 0x00010002 - -#define ZFCP_CFDC_DOWNLOAD 0x00000001 -#define ZFCP_CFDC_UPLOAD 0x00000002 -#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000 - -#define ZFCP_CFDC_IOC_MAGIC 0xDD -#define ZFCP_CFDC_IOC \ - _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data) - -/** - * struct zfcp_cfdc_data - data for ioctl cfdc interface - * @signature: request signature - * @devno: FCP adapter device number - * @command: command code - * @fsf_status: returns status of FSF command to userspace - * @fsf_status_qual: returned to userspace - * @payloads: access conflicts list - * @control_file: access control table - */ -struct zfcp_cfdc_data { - u32 signature; - u32 devno; - u32 command; - u32 fsf_status; - u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; - u8 payloads[256]; - u8 control_file[0]; -}; - -static int zfcp_cfdc_copy_from_user(struct scatterlist *sg, - void __user *user_buffer) -{ - unsigned int length; - unsigned int size = ZFCP_CFDC_MAX_SIZE; - - while (size) { - length = min((unsigned int)size, sg->length); - if (copy_from_user(sg_virt(sg++), user_buffer, length)) - return -EFAULT; - user_buffer += length; - size -= length; - } - return 0; -} - -static int zfcp_cfdc_copy_to_user(void __user *user_buffer, - struct scatterlist *sg) -{ - unsigned int length; - unsigned int size = ZFCP_CFDC_MAX_SIZE; - - while (size) { - length = min((unsigned int) size, sg->length); - if (copy_to_user(user_buffer, sg_virt(sg++), length)) - return -EFAULT; - user_buffer += length; - size -= length; - } - return 0; -} - -static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno) -{ - char busid[9]; - struct ccw_device *cdev; - struct zfcp_adapter *adapter; - - snprintf(busid, sizeof(busid), "0.0.%04x", devno); - cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); - if (!cdev) - return NULL; - - adapter = zfcp_ccw_adapter_by_cdev(cdev); - - put_device(&cdev->dev); - return adapter; -} - -static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command) -{ - switch (command) { - case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL: - fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; - fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE; - break; - case ZFCP_CFDC_CMND_DOWNLOAD_FORCE: - fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; - fsf_cfdc->option = FSF_CFDC_OPTION_FORCE; - break; - case ZFCP_CFDC_CMND_FULL_ACCESS: - fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; - fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS; - break; - case ZFCP_CFDC_CMND_RESTRICTED_ACCESS: - fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; - fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS; - break; - case ZFCP_CFDC_CMND_UPLOAD: - fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE; - fsf_cfdc->option = 0; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg, - u8 __user *control_file) -{ - int retval; - retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES); - if (retval) - return retval; - - sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE; - - if (command & ZFCP_CFDC_WITH_CONTROL_FILE && - command & ZFCP_CFDC_DOWNLOAD) { - retval = zfcp_cfdc_copy_from_user(sg, control_file); - if (retval) { - zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES); - return -EFAULT; - } - } - - return 0; -} - -static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data, - struct zfcp_fsf_req *req) -{ - data->fsf_status = req->qtcb->header.fsf_status; - memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual, - sizeof(union fsf_status_qual)); - memcpy(&data->payloads, &req->qtcb->bottom.support.els, - sizeof(req->qtcb->bottom.support.els)); -} - -static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, - unsigned long arg) -{ - struct zfcp_cfdc_data *data; - struct zfcp_cfdc_data __user *data_user; - struct zfcp_adapter *adapter; - struct zfcp_fsf_req *req; - struct zfcp_fsf_cfdc *fsf_cfdc; - int retval; - - if (command != ZFCP_CFDC_IOC) - return -ENOTTY; - - if (is_compat_task()) - data_user = compat_ptr(arg); - else - data_user = (void __user *)arg; - - if (!data_user) - return -EINVAL; - - fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL); - if (!fsf_cfdc) - return -ENOMEM; - - data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL); - if (!data) { - retval = -ENOMEM; - goto no_mem_sense; - } - - retval = copy_from_user(data, data_user, sizeof(*data)); - if (retval) { - retval = -EFAULT; - goto free_buffer; - } - - if (data->signature != 0xCFDCACDF) { - retval = -EINVAL; - goto free_buffer; - } - - retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command); - - adapter = zfcp_cfdc_get_adapter(data->devno); - if (!adapter) { - retval = -ENXIO; - goto free_buffer; - } - - retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg, - data_user->control_file); - if (retval) - goto adapter_put; - req = zfcp_fsf_control_file(adapter, fsf_cfdc); - if (IS_ERR(req)) { - retval = PTR_ERR(req); - goto free_sg; - } - - if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { - retval = -ENXIO; - goto free_fsf; - } - - zfcp_cfdc_req_to_sense(data, req); - retval = copy_to_user(data_user, data, sizeof(*data_user)); - if (retval) { - retval = -EFAULT; - goto free_fsf; - } - - if (data->command & ZFCP_CFDC_UPLOAD) - retval = zfcp_cfdc_copy_to_user(&data_user->control_file, - fsf_cfdc->sg); - - free_fsf: - zfcp_fsf_req_free(req); - free_sg: - zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES); - adapter_put: - zfcp_ccw_adapter_put(adapter); - free_buffer: - kfree(data); - no_mem_sense: - kfree(fsf_cfdc); - return retval; -} - -static const struct file_operations zfcp_cfdc_fops = { - .unlocked_ioctl = zfcp_cfdc_dev_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = zfcp_cfdc_dev_ioctl -#endif -}; - -struct miscdevice zfcp_cfdc_misc = { - .minor = MISC_DYNAMIC_MINOR, - .name = "zfcp_cfdc", - .fops = &zfcp_cfdc_fops, -}; diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 7a149fd85f6..0ca64484cfa 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -3,13 +3,15 @@ * * Debug traces for zfcp. * - * Copyright IBM Corporation 2002, 2009 + * Copyright IBM Corp. 2002, 2013 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/module.h> #include <linux/ctype.h> +#include <linux/slab.h> #include <asm/debug.h> #include "zfcp_dbf.h" #include "zfcp_ext.h" @@ -21,979 +23,455 @@ module_param(dbfsize, uint, 0400); MODULE_PARM_DESC(dbfsize, "number of pages for each debug feature area (default 4)"); -static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, - int level, char *from, int from_len) -{ - int offset; - struct zfcp_dbf_dump *dump = to; - int room = to_len - sizeof(*dump); - - for (offset = 0; offset < from_len; offset += dump->size) { - memset(to, 0, to_len); - strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); - dump->total_size = from_len; - dump->offset = offset; - dump->size = min(from_len - offset, room); - memcpy(dump->data, from + offset, dump->size); - debug_event(dbf, level, dump, dump->size + sizeof(*dump)); - } -} +static u32 dbflevel = 3; -static void zfcp_dbf_tag(char **p, const char *label, const char *tag) -{ - int i; - - *p += sprintf(*p, "%-24s", label); - for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++) - *p += sprintf(*p, "%c", tag[i]); - *p += sprintf(*p, "\n"); -} +module_param(dbflevel, uint, 0400); +MODULE_PARM_DESC(dbflevel, + "log level for each debug feature area " + "(default 3, range 0..6)"); -static void zfcp_dbf_outs(char **buf, const char *s1, const char *s2) +static inline unsigned int zfcp_dbf_plen(unsigned int offset) { - *buf += sprintf(*buf, "%-24s%s\n", s1, s2); + return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC; } -static void zfcp_dbf_out(char **buf, const char *s, const char *format, ...) +static inline +void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area, + u64 req_id) { - va_list arg; + struct zfcp_dbf_pay *pl = &dbf->pay_buf; + u16 offset = 0, rec_length; - *buf += sprintf(*buf, "%-24s", s); - va_start(arg, format); - *buf += vsprintf(*buf, format, arg); - va_end(arg); - *buf += sprintf(*buf, "\n"); -} + spin_lock(&dbf->pay_lock); + memset(pl, 0, sizeof(*pl)); + pl->fsf_req_id = req_id; + memcpy(pl->area, area, ZFCP_DBF_TAG_LEN); -static void zfcp_dbf_outd(char **p, const char *label, char *buffer, - int buflen, int offset, int total_size) -{ - if (!offset) - *p += sprintf(*p, "%-24s ", label); - while (buflen--) { - if (offset > 0) { - if ((offset % 32) == 0) - *p += sprintf(*p, "\n%-24c ", ' '); - else if ((offset % 4) == 0) - *p += sprintf(*p, " "); - } - *p += sprintf(*p, "%02x", *buffer++); - if (++offset == total_size) { - *p += sprintf(*p, "\n"); - break; - } - } - if (!total_size) - *p += sprintf(*p, "\n"); -} + while (offset < length) { + rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC, + (u16) (length - offset)); + memcpy(pl->data, data + offset, rec_length); + debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length)); -static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, - int area, debug_entry_t *entry, char *out_buf) -{ - struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry); - struct timespec t; - char *p = out_buf; - - if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) { - stck_to_timespec(entry->id.stck, &t); - zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu", - t.tv_sec, t.tv_nsec); - zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); - } else { - zfcp_dbf_outd(&p, "", dump->data, dump->size, dump->offset, - dump->total_size); - if ((dump->offset + dump->size) == dump->total_size) - p += sprintf(p, "\n"); + offset += rec_length; + pl->counter++; } - return p - out_buf; + + spin_unlock(&dbf->pay_lock); } -void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, - struct zfcp_fsf_req *fsf_req, - struct zfcp_dbf *dbf) +/** + * zfcp_dbf_hba_fsf_res - trace event for fsf responses + * @tag: tag indicating which kind of unsolicited status has been received + * @req: request for which a response was received + */ +void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) { - struct fsf_qtcb *qtcb = fsf_req->qtcb; - union fsf_prot_status_qual *prot_status_qual = - &qtcb->prefix.prot_status_qual; - union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual; - struct scsi_cmnd *scsi_cmnd; - struct zfcp_port *port; - struct zfcp_unit *unit; - struct zfcp_send_els *send_els; - struct zfcp_dbf_hba_record *rec = &dbf->hba_buf; - struct zfcp_dbf_hba_record_response *response = &rec->u.response; + struct zfcp_dbf *dbf = req->adapter->dbf; + struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix; + struct fsf_qtcb_header *q_head = &req->qtcb->header; + struct zfcp_dbf_hba *rec = &dbf->hba_buf; unsigned long flags; spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); - strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); - strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); - - response->fsf_command = fsf_req->fsf_command; - response->fsf_reqid = fsf_req->req_id; - response->fsf_seqno = fsf_req->seq_no; - response->fsf_issued = fsf_req->issued; - response->fsf_prot_status = qtcb->prefix.prot_status; - response->fsf_status = qtcb->header.fsf_status; - memcpy(response->fsf_prot_status_qual, - prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE); - memcpy(response->fsf_status_qual, - fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); - response->fsf_req_status = fsf_req->status; - response->sbal_first = fsf_req->qdio_req.sbal_first; - response->sbal_last = fsf_req->qdio_req.sbal_last; - response->sbal_response = fsf_req->qdio_req.sbal_response; - response->pool = fsf_req->pool != NULL; - response->erp_action = (unsigned long)fsf_req->erp_action; - - switch (fsf_req->fsf_command) { - case FSF_QTCB_FCP_CMND: - if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) - break; - scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; - if (scsi_cmnd) { - response->u.fcp.cmnd = (unsigned long)scsi_cmnd; - response->u.fcp.serial = scsi_cmnd->serial_number; - } - break; - - case FSF_QTCB_OPEN_PORT_WITH_DID: - case FSF_QTCB_CLOSE_PORT: - case FSF_QTCB_CLOSE_PHYSICAL_PORT: - port = (struct zfcp_port *)fsf_req->data; - response->u.port.wwpn = port->wwpn; - response->u.port.d_id = port->d_id; - response->u.port.port_handle = qtcb->header.port_handle; - break; - - case FSF_QTCB_OPEN_LUN: - case FSF_QTCB_CLOSE_LUN: - unit = (struct zfcp_unit *)fsf_req->data; - port = unit->port; - response->u.unit.wwpn = port->wwpn; - response->u.unit.fcp_lun = unit->fcp_lun; - response->u.unit.port_handle = qtcb->header.port_handle; - response->u.unit.lun_handle = qtcb->header.lun_handle; - break; - - case FSF_QTCB_SEND_ELS: - send_els = (struct zfcp_send_els *)fsf_req->data; - response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id); - break; - - case FSF_QTCB_ABORT_FCP_CMND: - case FSF_QTCB_SEND_GENERIC: - case FSF_QTCB_EXCHANGE_CONFIG_DATA: - case FSF_QTCB_EXCHANGE_PORT_DATA: - case FSF_QTCB_DOWNLOAD_CONTROL_FILE: - case FSF_QTCB_UPLOAD_CONTROL_FILE: - break; - } - - debug_event(dbf->hba, level, rec, sizeof(*rec)); - /* have fcp channel microcode fixed to use as little as possible */ - if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { - /* adjust length skipping trailing zeros */ - char *buf = (char *)qtcb + qtcb->header.log_start; - int len = qtcb->header.log_length; - for (; len && !buf[len - 1]; len--); - zfcp_dbf_hexdump(dbf->hba, rec, sizeof(*rec), level, buf, - len); + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->id = ZFCP_DBF_HBA_RES; + rec->fsf_req_id = req->req_id; + rec->fsf_req_status = req->status; + rec->fsf_cmd = req->fsf_command; + rec->fsf_seq_no = req->seq_no; + rec->u.res.req_issued = req->issued; + rec->u.res.prot_status = q_pref->prot_status; + rec->u.res.fsf_status = q_head->fsf_status; + + memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual, + FSF_PROT_STATUS_QUAL_SIZE); + memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual, + FSF_STATUS_QUALIFIER_SIZE); + + if (req->fsf_command != FSF_QTCB_FCP_CMND) { + rec->pl_len = q_head->log_length; + zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, + rec->pl_len, "fsf_res", req->req_id); } + debug_event(dbf->hba, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); } -void _zfcp_dbf_hba_fsf_unsol(const char *tag, int level, struct zfcp_dbf *dbf, - struct fsf_status_read_buffer *status_buffer) +/** + * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer + * @tag: tag indicating which kind of unsolicited status has been received + * @req: request providing the unsolicited status + */ +void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req) { - struct zfcp_dbf_hba_record *rec = &dbf->hba_buf; + struct zfcp_dbf *dbf = req->adapter->dbf; + struct fsf_status_read_buffer *srb = req->data; + struct zfcp_dbf_hba *rec = &dbf->hba_buf; unsigned long flags; spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); - strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); - strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); - - rec->u.status.failed = atomic_read(&dbf->adapter->stat_miss); - if (status_buffer != NULL) { - rec->u.status.status_type = status_buffer->status_type; - rec->u.status.status_subtype = status_buffer->status_subtype; - memcpy(&rec->u.status.queue_designator, - &status_buffer->queue_designator, - sizeof(struct fsf_queue_designator)); - - switch (status_buffer->status_type) { - case FSF_STATUS_READ_SENSE_DATA_AVAIL: - rec->u.status.payload_size = - ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL; - break; - - case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: - rec->u.status.payload_size = - ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD; - break; - - case FSF_STATUS_READ_LINK_DOWN: - switch (status_buffer->status_subtype) { - case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: - case FSF_STATUS_READ_SUB_FDISC_FAILED: - rec->u.status.payload_size = - sizeof(struct fsf_link_down_info); - } - break; - - case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: - rec->u.status.payload_size = - ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT; - break; - } - memcpy(&rec->u.status.payload, - &status_buffer->payload, rec->u.status.payload_size); - } - debug_event(dbf->hba, level, rec, sizeof(*rec)); + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->id = ZFCP_DBF_HBA_USS; + rec->fsf_req_id = req->req_id; + rec->fsf_req_status = req->status; + rec->fsf_cmd = req->fsf_command; + + if (!srb) + goto log; + + rec->u.uss.status_type = srb->status_type; + rec->u.uss.status_subtype = srb->status_subtype; + rec->u.uss.d_id = ntoh24(srb->d_id); + rec->u.uss.lun = srb->fcp_lun; + memcpy(&rec->u.uss.queue_designator, &srb->queue_designator, + sizeof(rec->u.uss.queue_designator)); + + /* status read buffer payload length */ + rec->pl_len = (!srb->length) ? 0 : srb->length - + offsetof(struct fsf_status_read_buffer, payload); + + if (rec->pl_len) + zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len, + "fsf_uss", req->req_id); +log: + debug_event(dbf->hba, 2, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); } /** - * zfcp_dbf_hba_qdio - trace event for QDIO related failure - * @qdio: qdio structure affected by this QDIO related event - * @qdio_error: as passed by qdio module - * @sbal_index: first buffer with error condition, as passed by qdio module - * @sbal_count: number of buffers affected, as passed by qdio module + * zfcp_dbf_hba_bit_err - trace event for bit error conditions + * @tag: tag indicating which kind of unsolicited status has been received + * @req: request which caused the bit_error condition */ -void zfcp_dbf_hba_qdio(struct zfcp_dbf *dbf, unsigned int qdio_error, - int sbal_index, int sbal_count) +void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) { - struct zfcp_dbf_hba_record *r = &dbf->hba_buf; + struct zfcp_dbf *dbf = req->adapter->dbf; + struct zfcp_dbf_hba *rec = &dbf->hba_buf; + struct fsf_status_read_buffer *sr_buf = req->data; unsigned long flags; spin_lock_irqsave(&dbf->hba_lock, flags); - memset(r, 0, sizeof(*r)); - strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); - r->u.qdio.qdio_error = qdio_error; - r->u.qdio.sbal_index = sbal_index; - r->u.qdio.sbal_count = sbal_count; - debug_event(dbf->hba, 0, r, sizeof(*r)); + memset(rec, 0, sizeof(*rec)); + + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->id = ZFCP_DBF_HBA_BIT; + rec->fsf_req_id = req->req_id; + rec->fsf_req_status = req->status; + rec->fsf_cmd = req->fsf_command; + memcpy(&rec->u.be, &sr_buf->payload.bit_error, + sizeof(struct fsf_bit_error_payload)); + + debug_event(dbf->hba, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_lock, flags); } /** - * zfcp_dbf_hba_berr - trace event for bit error threshold - * @dbf: dbf structure affected by this QDIO related event - * @req: fsf request + * zfcp_dbf_hba_def_err - trace event for deferred error messages + * @adapter: pointer to struct zfcp_adapter + * @req_id: request id which caused the deferred error message + * @scount: number of sbals incl. the signaling sbal + * @pl: array of all involved sbals */ -void zfcp_dbf_hba_berr(struct zfcp_dbf *dbf, struct zfcp_fsf_req *req) +void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount, + void **pl) { - struct zfcp_dbf_hba_record *r = &dbf->hba_buf; - struct fsf_status_read_buffer *sr_buf = req->data; - struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf_pay *payload = &dbf->pay_buf; unsigned long flags; + u16 length; - spin_lock_irqsave(&dbf->hba_lock, flags); - memset(r, 0, sizeof(*r)); - strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE); - memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload)); - debug_event(dbf->hba, 0, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->hba_lock, flags); -} -static void zfcp_dbf_hba_view_response(char **p, - struct zfcp_dbf_hba_record_response *r) -{ - struct timespec t; - - zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command); - zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); - zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno); - stck_to_timespec(r->fsf_issued, &t); - zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); - zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status); - zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status); - zfcp_dbf_outd(p, "fsf_prot_status_qual", r->fsf_prot_status_qual, - FSF_PROT_STATUS_QUAL_SIZE, 0, FSF_PROT_STATUS_QUAL_SIZE); - zfcp_dbf_outd(p, "fsf_status_qual", r->fsf_status_qual, - FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE); - zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status); - zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first); - zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last); - zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response); - zfcp_dbf_out(p, "pool", "0x%02x", r->pool); - - switch (r->fsf_command) { - case FSF_QTCB_FCP_CMND: - if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) - break; - zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); - zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); - *p += sprintf(*p, "\n"); - break; - - case FSF_QTCB_OPEN_PORT_WITH_DID: - case FSF_QTCB_CLOSE_PORT: - case FSF_QTCB_CLOSE_PHYSICAL_PORT: - zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.port.wwpn); - zfcp_dbf_out(p, "d_id", "0x%06x", r->u.port.d_id); - zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.port.port_handle); - break; - - case FSF_QTCB_OPEN_LUN: - case FSF_QTCB_CLOSE_LUN: - zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.unit.wwpn); - zfcp_dbf_out(p, "fcp_lun", "0x%016Lx", r->u.unit.fcp_lun); - zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.unit.port_handle); - zfcp_dbf_out(p, "lun_handle", "0x%08x", r->u.unit.lun_handle); - break; - - case FSF_QTCB_SEND_ELS: - zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id); - break; - - case FSF_QTCB_ABORT_FCP_CMND: - case FSF_QTCB_SEND_GENERIC: - case FSF_QTCB_EXCHANGE_CONFIG_DATA: - case FSF_QTCB_EXCHANGE_PORT_DATA: - case FSF_QTCB_DOWNLOAD_CONTROL_FILE: - case FSF_QTCB_UPLOAD_CONTROL_FILE: - break; - } -} - -static void zfcp_dbf_hba_view_status(char **p, - struct zfcp_dbf_hba_record_status *r) -{ - zfcp_dbf_out(p, "failed", "0x%02x", r->failed); - zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type); - zfcp_dbf_out(p, "status_subtype", "0x%08x", r->status_subtype); - zfcp_dbf_outd(p, "queue_designator", (char *)&r->queue_designator, - sizeof(struct fsf_queue_designator), 0, - sizeof(struct fsf_queue_designator)); - zfcp_dbf_outd(p, "payload", (char *)&r->payload, r->payload_size, 0, - r->payload_size); -} - -static void zfcp_dbf_hba_view_qdio(char **p, struct zfcp_dbf_hba_record_qdio *r) -{ - zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); - zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); - zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); -} + if (!pl) + return; -static void zfcp_dbf_hba_view_berr(char **p, struct fsf_bit_error_payload *r) -{ - zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count); - zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count); - zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count); - zfcp_dbf_out(p, "prim_seq_err", "%d", - r->primitive_sequence_error_count); - zfcp_dbf_out(p, "inval_trans_word_err", "%d", - r->invalid_transmission_word_error_count); - zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count); - zfcp_dbf_out(p, "prim_seq_event_to", "%d", - r->primitive_sequence_event_timeout_count); - zfcp_dbf_out(p, "elast_buf_overrun_err", "%d", - r->elastic_buffer_overrun_error_count); - zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d", - r->advertised_receive_b2b_credit); - zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d", - r->current_receive_b2b_credit); - zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d", - r->advertised_transmit_b2b_credit); - zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d", - r->current_transmit_b2b_credit); -} + spin_lock_irqsave(&dbf->pay_lock, flags); + memset(payload, 0, sizeof(*payload)); -static int zfcp_dbf_hba_view_format(debug_info_t *id, struct debug_view *view, - char *out_buf, const char *in_buf) -{ - struct zfcp_dbf_hba_record *r = (struct zfcp_dbf_hba_record *)in_buf; - char *p = out_buf; - - if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) - return 0; - - zfcp_dbf_tag(&p, "tag", r->tag); - if (isalpha(r->tag2[0])) - zfcp_dbf_tag(&p, "tag2", r->tag2); - - if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_dbf_hba_view_response(&p, &r->u.response); - else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_dbf_hba_view_status(&p, &r->u.status); - else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_dbf_hba_view_qdio(&p, &r->u.qdio); - else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_dbf_hba_view_berr(&p, &r->u.berr); - - if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0) - p += sprintf(p, "\n"); - return p - out_buf; -} + memcpy(payload->area, "def_err", 7); + payload->fsf_req_id = req_id; + payload->counter = 0; + length = min((u16)sizeof(struct qdio_buffer), + (u16)ZFCP_DBF_PAY_MAX_REC); -static struct debug_view zfcp_dbf_hba_view = { - .name = "structured", - .header_proc = zfcp_dbf_view_header, - .format_proc = zfcp_dbf_hba_view_format, -}; - -static const char *zfcp_dbf_rec_tags[] = { - [ZFCP_REC_DBF_ID_THREAD] = "thread", - [ZFCP_REC_DBF_ID_TARGET] = "target", - [ZFCP_REC_DBF_ID_TRIGGER] = "trigger", - [ZFCP_REC_DBF_ID_ACTION] = "action", -}; - -static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view, - char *buf, const char *_rec) -{ - struct zfcp_dbf_rec_record *r = (struct zfcp_dbf_rec_record *)_rec; - char *p = buf; - char hint[ZFCP_DBF_ID_SIZE + 1]; - - memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE); - hint[ZFCP_DBF_ID_SIZE] = 0; - zfcp_dbf_outs(&p, "tag", zfcp_dbf_rec_tags[r->id]); - zfcp_dbf_outs(&p, "hint", hint); - switch (r->id) { - case ZFCP_REC_DBF_ID_THREAD: - zfcp_dbf_out(&p, "total", "%d", r->u.thread.total); - zfcp_dbf_out(&p, "ready", "%d", r->u.thread.ready); - zfcp_dbf_out(&p, "running", "%d", r->u.thread.running); - break; - case ZFCP_REC_DBF_ID_TARGET: - zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.target.ref); - zfcp_dbf_out(&p, "status", "0x%08x", r->u.target.status); - zfcp_dbf_out(&p, "erp_count", "%d", r->u.target.erp_count); - zfcp_dbf_out(&p, "d_id", "0x%06x", r->u.target.d_id); - zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.target.wwpn); - zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.target.fcp_lun); - break; - case ZFCP_REC_DBF_ID_TRIGGER: - zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.trigger.ref); - zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.trigger.action); - zfcp_dbf_out(&p, "requested", "%d", r->u.trigger.want); - zfcp_dbf_out(&p, "executed", "%d", r->u.trigger.need); - zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.trigger.wwpn); - zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun); - zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as); - zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps); - zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us); - break; - case ZFCP_REC_DBF_ID_ACTION: - zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action); - zfcp_dbf_out(&p, "fsf_req", "0x%016Lx", r->u.action.fsf_req); - zfcp_dbf_out(&p, "status", "0x%08Lx", r->u.action.status); - zfcp_dbf_out(&p, "step", "0x%08Lx", r->u.action.step); - break; + while (payload->counter < scount && (char *)pl[payload->counter]) { + memcpy(payload->data, (char *)pl[payload->counter], length); + debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length)); + payload->counter++; } - p += sprintf(p, "\n"); - return p - buf; -} -static struct debug_view zfcp_dbf_rec_view = { - .name = "structured", - .header_proc = zfcp_dbf_view_header, - .format_proc = zfcp_dbf_rec_view_format, -}; - -/** - * zfcp_dbf_rec_thread - trace event related to recovery thread operation - * @id2: identifier for event - * @dbf: reference to dbf structure - * This function assumes that the caller is holding erp_lock. - */ -void zfcp_dbf_rec_thread(char *id2, struct zfcp_dbf *dbf) -{ - struct zfcp_adapter *adapter = dbf->adapter; - struct zfcp_dbf_rec_record *r = &dbf->rec_buf; - unsigned long flags = 0; - struct list_head *entry; - unsigned ready = 0, running = 0, total; - - list_for_each(entry, &adapter->erp_ready_head) - ready++; - list_for_each(entry, &adapter->erp_running_head) - running++; - total = adapter->erp_total_count; - - spin_lock_irqsave(&dbf->rec_lock, flags); - memset(r, 0, sizeof(*r)); - r->id = ZFCP_REC_DBF_ID_THREAD; - memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); - r->u.thread.total = total; - r->u.thread.ready = ready; - r->u.thread.running = running; - debug_event(dbf->rec, 6, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_lock, flags); + spin_unlock_irqrestore(&dbf->pay_lock, flags); } /** - * zfcp_dbf_rec_thread - trace event related to recovery thread operation - * @id2: identifier for event - * @adapter: adapter - * This function assumes that the caller does not hold erp_lock. + * zfcp_dbf_hba_basic - trace event for basic adapter events + * @adapter: pointer to struct zfcp_adapter */ -void zfcp_dbf_rec_thread_lock(char *id2, struct zfcp_dbf *dbf) +void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter) { - struct zfcp_adapter *adapter = dbf->adapter; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf_hba *rec = &dbf->hba_buf; unsigned long flags; - read_lock_irqsave(&adapter->erp_lock, flags); - zfcp_dbf_rec_thread(id2, dbf); - read_unlock_irqrestore(&adapter->erp_lock, flags); -} + spin_lock_irqsave(&dbf->hba_lock, flags); + memset(rec, 0, sizeof(*rec)); -static void zfcp_dbf_rec_target(char *id2, void *ref, struct zfcp_dbf *dbf, - atomic_t *status, atomic_t *erp_count, u64 wwpn, - u32 d_id, u64 fcp_lun) -{ - struct zfcp_dbf_rec_record *r = &dbf->rec_buf; - unsigned long flags; + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->id = ZFCP_DBF_HBA_BASIC; - spin_lock_irqsave(&dbf->rec_lock, flags); - memset(r, 0, sizeof(*r)); - r->id = ZFCP_REC_DBF_ID_TARGET; - memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); - r->u.target.ref = (unsigned long)ref; - r->u.target.status = atomic_read(status); - r->u.target.wwpn = wwpn; - r->u.target.d_id = d_id; - r->u.target.fcp_lun = fcp_lun; - r->u.target.erp_count = atomic_read(erp_count); - debug_event(dbf->rec, 3, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_lock, flags); + debug_event(dbf->hba, 1, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); } -/** - * zfcp_dbf_rec_adapter - trace event for adapter state change - * @id: identifier for trigger of state change - * @ref: additional reference (e.g. request) - * @dbf: reference to dbf structure - */ -void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf) +static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, + struct zfcp_adapter *adapter, + struct zfcp_port *port, + struct scsi_device *sdev) { - struct zfcp_adapter *adapter = dbf->adapter; - - zfcp_dbf_rec_target(id, ref, dbf, &adapter->status, - &adapter->erp_counter, 0, 0, - ZFCP_DBF_INVALID_LUN); + rec->adapter_status = atomic_read(&adapter->status); + if (port) { + rec->port_status = atomic_read(&port->status); + rec->wwpn = port->wwpn; + rec->d_id = port->d_id; + } + if (sdev) { + rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status); + rec->lun = zfcp_scsi_dev_lun(sdev); + } } /** - * zfcp_dbf_rec_port - trace event for port state change - * @id: identifier for trigger of state change - * @ref: additional reference (e.g. request) - * @port: port + * zfcp_dbf_rec_trig - trace event related to triggered recovery + * @tag: identifier for event + * @adapter: adapter on which the erp_action should run + * @port: remote port involved in the erp_action + * @sdev: scsi device involved in the erp_action + * @want: wanted erp_action + * @need: required erp_action + * + * The adapter->erp_lock has to be held. */ -void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port) +void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, + struct zfcp_port *port, struct scsi_device *sdev, + u8 want, u8 need) { - struct zfcp_dbf *dbf = port->adapter->dbf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf_rec *rec = &dbf->rec_buf; + struct list_head *entry; + unsigned long flags; - zfcp_dbf_rec_target(id, ref, dbf, &port->status, - &port->erp_counter, port->wwpn, port->d_id, - ZFCP_DBF_INVALID_LUN); -} + spin_lock_irqsave(&dbf->rec_lock, flags); + memset(rec, 0, sizeof(*rec)); -/** - * zfcp_dbf_rec_unit - trace event for unit state change - * @id: identifier for trigger of state change - * @ref: additional reference (e.g. request) - * @unit: unit - */ -void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit) -{ - struct zfcp_port *port = unit->port; - struct zfcp_dbf *dbf = port->adapter->dbf; + rec->id = ZFCP_DBF_REC_TRIG; + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + zfcp_dbf_set_common(rec, adapter, port, sdev); - zfcp_dbf_rec_target(id, ref, dbf, &unit->status, - &unit->erp_counter, port->wwpn, port->d_id, - unit->fcp_lun); -} + list_for_each(entry, &adapter->erp_ready_head) + rec->u.trig.ready++; -/** - * zfcp_dbf_rec_trigger - trace event for triggered error recovery - * @id2: identifier for error recovery trigger - * @ref: additional reference (e.g. request) - * @want: originally requested error recovery action - * @need: error recovery action actually initiated - * @action: address of error recovery action struct - * @adapter: adapter - * @port: port - * @unit: unit - */ -void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, - struct zfcp_adapter *adapter, struct zfcp_port *port, - struct zfcp_unit *unit) -{ - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_dbf_rec_record *r = &dbf->rec_buf; - unsigned long flags; + list_for_each(entry, &adapter->erp_running_head) + rec->u.trig.running++; - spin_lock_irqsave(&dbf->rec_lock, flags); - memset(r, 0, sizeof(*r)); - r->id = ZFCP_REC_DBF_ID_TRIGGER; - memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); - r->u.trigger.ref = (unsigned long)ref; - r->u.trigger.want = want; - r->u.trigger.need = need; - r->u.trigger.action = (unsigned long)action; - r->u.trigger.as = atomic_read(&adapter->status); - if (port) { - r->u.trigger.ps = atomic_read(&port->status); - r->u.trigger.wwpn = port->wwpn; - } - if (unit) - r->u.trigger.us = atomic_read(&unit->status); - r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN; - debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); + rec->u.trig.want = want; + rec->u.trig.need = need; + + debug_event(dbf->rec, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->rec_lock, flags); } + /** - * zfcp_dbf_rec_action - trace event showing progress of recovery action - * @id2: identifier - * @erp_action: error recovery action struct pointer + * zfcp_dbf_rec_run - trace event related to running recovery + * @tag: identifier for event + * @erp: erp_action running */ -void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action) +void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) { - struct zfcp_dbf *dbf = erp_action->adapter->dbf; - struct zfcp_dbf_rec_record *r = &dbf->rec_buf; + struct zfcp_dbf *dbf = erp->adapter->dbf; + struct zfcp_dbf_rec *rec = &dbf->rec_buf; unsigned long flags; spin_lock_irqsave(&dbf->rec_lock, flags); - memset(r, 0, sizeof(*r)); - r->id = ZFCP_REC_DBF_ID_ACTION; - memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); - r->u.action.action = (unsigned long)erp_action; - r->u.action.status = erp_action->status; - r->u.action.step = erp_action->step; - r->u.action.fsf_req = erp_action->fsf_req_id; - debug_event(dbf->rec, 5, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_lock, flags); -} + memset(rec, 0, sizeof(*rec)); -/** - * zfcp_dbf_san_ct_request - trace event for issued CT request - * @fsf_req: request containing issued CT data - * @d_id: destination id where ct request is sent to - */ -void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id) -{ - struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data; - struct zfcp_adapter *adapter = fsf_req->adapter; - struct zfcp_dbf *dbf = adapter->dbf; - struct fc_ct_hdr *hdr = sg_virt(ct->req); - struct zfcp_dbf_san_record *r = &dbf->san_buf; - struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req; - int level = 3; - unsigned long flags; + rec->id = ZFCP_DBF_REC_RUN; + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev); - spin_lock_irqsave(&dbf->san_lock, flags); - memset(r, 0, sizeof(*r)); - strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); - r->fsf_reqid = fsf_req->req_id; - r->fsf_seqno = fsf_req->seq_no; - oct->d_id = d_id; - oct->cmd_req_code = hdr->ct_cmd; - oct->revision = hdr->ct_rev; - oct->gs_type = hdr->ct_fs_type; - oct->gs_subtype = hdr->ct_fs_subtype; - oct->options = hdr->ct_options; - oct->max_res_size = hdr->ct_mr_size; - oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr), - ZFCP_DBF_SAN_MAX_PAYLOAD); - debug_event(dbf->san, level, r, sizeof(*r)); - zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, - (void *)hdr + sizeof(struct fc_ct_hdr), oct->len); - spin_unlock_irqrestore(&dbf->san_lock, flags); -} + rec->u.run.fsf_req_id = erp->fsf_req_id; + rec->u.run.rec_status = erp->status; + rec->u.run.rec_step = erp->step; + rec->u.run.rec_action = erp->action; -/** - * zfcp_dbf_san_ct_response - trace event for completion of CT request - * @fsf_req: request containing CT response - */ -void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) -{ - struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data; - struct zfcp_adapter *adapter = fsf_req->adapter; - struct fc_ct_hdr *hdr = sg_virt(ct->resp); - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_dbf_san_record *r = &dbf->san_buf; - struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp; - int level = 3; - unsigned long flags; + if (erp->sdev) + rec->u.run.rec_count = + atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter); + else if (erp->port) + rec->u.run.rec_count = atomic_read(&erp->port->erp_counter); + else + rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter); - spin_lock_irqsave(&dbf->san_lock, flags); - memset(r, 0, sizeof(*r)); - strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); - r->fsf_reqid = fsf_req->req_id; - r->fsf_seqno = fsf_req->seq_no; - rct->cmd_rsp_code = hdr->ct_cmd; - rct->revision = hdr->ct_rev; - rct->reason_code = hdr->ct_reason; - rct->expl = hdr->ct_explan; - rct->vendor_unique = hdr->ct_vendor; - rct->max_res_size = hdr->ct_mr_size; - rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr), - ZFCP_DBF_SAN_MAX_PAYLOAD); - debug_event(dbf->san, level, r, sizeof(*r)); - zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, - (void *)hdr + sizeof(struct fc_ct_hdr), rct->len); - spin_unlock_irqrestore(&dbf->san_lock, flags); + debug_event(dbf->rec, 1, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); } -static void zfcp_dbf_san_els(const char *tag, int level, - struct zfcp_fsf_req *fsf_req, u32 d_id, - void *buffer, int buflen) +static inline +void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, + u64 req_id, u32 d_id) { - struct zfcp_adapter *adapter = fsf_req->adapter; - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_dbf_san_record *rec = &dbf->san_buf; + struct zfcp_dbf_san *rec = &dbf->san_buf; + u16 rec_len; unsigned long flags; spin_lock_irqsave(&dbf->san_lock, flags); memset(rec, 0, sizeof(*rec)); - strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); - rec->fsf_reqid = fsf_req->req_id; - rec->fsf_seqno = fsf_req->seq_no; - rec->u.els.d_id = d_id; - debug_event(dbf->san, level, rec, sizeof(*rec)); - zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, - buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); + + rec->id = id; + rec->fsf_req_id = req_id; + rec->d_id = d_id; + rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD); + memcpy(rec->payload, data, rec_len); + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + + debug_event(dbf->san, 1, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->san_lock, flags); } /** - * zfcp_dbf_san_els_request - trace event for issued ELS - * @fsf_req: request containing issued ELS + * zfcp_dbf_san_req - trace event for issued SAN request + * @tag: identifier for event + * @fsf_req: request containing issued CT data + * d_id: destination ID */ -void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) { - struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data; - u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id); + struct zfcp_dbf *dbf = fsf->adapter->dbf; + struct zfcp_fsf_ct_els *ct_els = fsf->data; + u16 length; - zfcp_dbf_san_els("oels", 2, fsf_req, d_id, - sg_virt(els->req), els->req->length); + length = (u16)(ct_els->req->length + FC_CT_HDR_LEN); + zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length, + fsf->req_id, d_id); } /** - * zfcp_dbf_san_els_response - trace event for completed ELS - * @fsf_req: request containing ELS response + * zfcp_dbf_san_res - trace event for received SAN request + * @tag: identifier for event + * @fsf_req: request containing issued CT data */ -void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) { - struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data; - u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id); + struct zfcp_dbf *dbf = fsf->adapter->dbf; + struct zfcp_fsf_ct_els *ct_els = fsf->data; + u16 length; - zfcp_dbf_san_els("rels", 2, fsf_req, d_id, - sg_virt(els->resp), els->resp->length); + length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN); + zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length, + fsf->req_id, 0); } /** - * zfcp_dbf_san_incoming_els - trace event for incomig ELS - * @fsf_req: request containing unsolicited status buffer with incoming ELS + * zfcp_dbf_san_in_els - trace event for incoming ELS + * @tag: identifier for event + * @fsf_req: request containing issued CT data */ -void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) { - struct fsf_status_read_buffer *buf = - (struct fsf_status_read_buffer *)fsf_req->data; - int length = (int)buf->length - - (int)((void *)&buf->payload - (void *)buf); + struct zfcp_dbf *dbf = fsf->adapter->dbf; + struct fsf_status_read_buffer *srb = + (struct fsf_status_read_buffer *) fsf->data; + u16 length; - zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id), - (void *)buf->payload.data, length); -} - -static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, - char *out_buf, const char *in_buf) -{ - struct zfcp_dbf_san_record *r = (struct zfcp_dbf_san_record *)in_buf; - char *p = out_buf; - - if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) - return 0; - - zfcp_dbf_tag(&p, "tag", r->tag); - zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); - zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); - - if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { - struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req; - zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id); - zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); - zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); - zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); - zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype); - zfcp_dbf_out(&p, "options", "0x%02x", ct->options); - zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); - } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { - struct zfcp_dbf_san_record_ct_response *ct = &r->u.ct_resp; - zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); - zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); - zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); - zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl); - zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique); - zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); - } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || - strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || - strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { - struct zfcp_dbf_san_record_els *els = &r->u.els; - zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id); - } - return p - out_buf; + length = (u16)(srb->length - + offsetof(struct fsf_status_read_buffer, payload)); + zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length, + fsf->req_id, ntoh24(srb->d_id)); } -static struct debug_view zfcp_dbf_san_view = { - .name = "structured", - .header_proc = zfcp_dbf_view_header, - .format_proc = zfcp_dbf_san_view_format, -}; - -void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, - struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *fsf_req, unsigned long old_req_id) +/** + * zfcp_dbf_scsi - trace event for scsi commands + * @tag: identifier for event + * @sc: pointer to struct scsi_cmnd + * @fsf: pointer to struct zfcp_fsf_req + */ +void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) { - struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; - struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; - unsigned long flags; + struct zfcp_adapter *adapter = + (struct zfcp_adapter *) sc->device->host->hostdata[0]; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf_scsi *rec = &dbf->scsi_buf; struct fcp_resp_with_ext *fcp_rsp; - struct fcp_resp_rsp_info *fcp_rsp_info = NULL; - char *fcp_sns_info = NULL; - int offset = 0, buflen = 0; + struct fcp_resp_rsp_info *fcp_rsp_info; + unsigned long flags; spin_lock_irqsave(&dbf->scsi_lock, flags); - do { - memset(rec, 0, sizeof(*rec)); - if (offset == 0) { - strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); - strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); - if (scsi_cmnd != NULL) { - if (scsi_cmnd->device) { - rec->scsi_id = scsi_cmnd->device->id; - rec->scsi_lun = scsi_cmnd->device->lun; - } - rec->scsi_result = scsi_cmnd->result; - rec->scsi_cmnd = (unsigned long)scsi_cmnd; - rec->scsi_serial = scsi_cmnd->serial_number; - memcpy(rec->scsi_opcode, scsi_cmnd->cmnd, - min((int)scsi_cmnd->cmd_len, - ZFCP_DBF_SCSI_OPCODE)); - rec->scsi_retries = scsi_cmnd->retries; - rec->scsi_allowed = scsi_cmnd->allowed; - } - if (fsf_req != NULL) { - fcp_rsp = (struct fcp_resp_with_ext *) - &(fsf_req->qtcb->bottom.io.fcp_rsp); - fcp_rsp_info = (struct fcp_resp_rsp_info *) - &fcp_rsp[1]; - fcp_sns_info = (char *) &fcp_rsp[1]; - if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) - fcp_sns_info += fcp_rsp->ext.fr_sns_len; - - rec->rsp_validity = fcp_rsp->resp.fr_flags; - rec->rsp_scsi_status = fcp_rsp->resp.fr_status; - rec->rsp_resid = fcp_rsp->ext.fr_resid; - if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) - rec->rsp_code = fcp_rsp_info->rsp_code; - if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) { - buflen = min(fcp_rsp->ext.fr_sns_len, - (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO); - rec->sns_info_len = buflen; - memcpy(rec->sns_info, fcp_sns_info, - min(buflen, - ZFCP_DBF_SCSI_FCP_SNS_INFO)); - offset += min(buflen, - ZFCP_DBF_SCSI_FCP_SNS_INFO); - } - - rec->fsf_reqid = fsf_req->req_id; - rec->fsf_seqno = fsf_req->seq_no; - rec->fsf_issued = fsf_req->issued; - } - rec->old_fsf_reqid = old_req_id; - } else { - strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); - dump->total_size = buflen; - dump->offset = offset; - dump->size = min(buflen - offset, - (int)sizeof(struct - zfcp_dbf_scsi_record) - - (int)sizeof(struct zfcp_dbf_dump)); - memcpy(dump->data, fcp_sns_info + offset, dump->size); - offset += dump->size; - } - debug_event(dbf->scsi, level, rec, sizeof(*rec)); - } while (offset < buflen); - spin_unlock_irqrestore(&dbf->scsi_lock, flags); -} + memset(rec, 0, sizeof(*rec)); -static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, - char *out_buf, const char *in_buf) -{ - struct zfcp_dbf_scsi_record *r = (struct zfcp_dbf_scsi_record *)in_buf; - struct timespec t; - char *p = out_buf; - - if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) - return 0; - - zfcp_dbf_tag(&p, "tag", r->tag); - zfcp_dbf_tag(&p, "tag2", r->tag2); - zfcp_dbf_out(&p, "scsi_id", "0x%08x", r->scsi_id); - zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun); - zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result); - zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd); - zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial); - zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE, - 0, ZFCP_DBF_SCSI_OPCODE); - zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries); - zfcp_dbf_out(&p, "scsi_allowed", "0x%02x", r->scsi_allowed); - if (strncmp(r->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid); - zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); - zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); - stck_to_timespec(r->fsf_issued, &t); - zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); - - if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) { - zfcp_dbf_out(&p, "fcp_rsp_validity", "0x%02x", r->rsp_validity); - zfcp_dbf_out(&p, "fcp_rsp_scsi_status", "0x%02x", - r->rsp_scsi_status); - zfcp_dbf_out(&p, "fcp_rsp_resid", "0x%08x", r->rsp_resid); - zfcp_dbf_out(&p, "fcp_rsp_code", "0x%08x", r->rsp_code); - zfcp_dbf_out(&p, "fcp_sns_info_len", "0x%08x", r->sns_info_len); - zfcp_dbf_outd(&p, "fcp_sns_info", r->sns_info, - min((int)r->sns_info_len, - ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, - r->sns_info_len); + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->id = ZFCP_DBF_SCSI_CMND; + rec->scsi_result = sc->result; + rec->scsi_retries = sc->retries; + rec->scsi_allowed = sc->allowed; + rec->scsi_id = sc->device->id; + rec->scsi_lun = sc->device->lun; + rec->host_scribble = (unsigned long)sc->host_scribble; + + memcpy(rec->scsi_opcode, sc->cmnd, + min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE)); + + if (fsf) { + rec->fsf_req_id = fsf->req_id; + fcp_rsp = (struct fcp_resp_with_ext *) + &(fsf->qtcb->bottom.io.fcp_rsp); + memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT); + if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) { + fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; + rec->fcp_rsp_info = fcp_rsp_info->rsp_code; + } + if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) { + rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE, + (u16)ZFCP_DBF_PAY_MAX_REC); + zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len, + "fcp_sns", fsf->req_id); + } } - p += sprintf(p, "\n"); - return p - out_buf; -} -static struct debug_view zfcp_dbf_scsi_view = { - .name = "structured", - .header_proc = zfcp_dbf_view_header, - .format_proc = zfcp_dbf_scsi_view_format, -}; + debug_event(dbf->scsi, 1, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->scsi_lock, flags); +} -static debug_info_t *zfcp_dbf_reg(const char *name, int level, - struct debug_view *view, int size) +static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size) { struct debug_info *d; - d = debug_register(name, dbfsize, level, size); + d = debug_register(name, size, 1, rec_size); if (!d) return NULL; debug_register_view(d, &debug_hex_ascii_view); - debug_register_view(d, view); - debug_set_level(d, level); + debug_set_level(d, dbflevel); return d; } +static void zfcp_dbf_unregister(struct zfcp_dbf *dbf) +{ + if (!dbf) + return; + + debug_unregister(dbf->scsi); + debug_unregister(dbf->san); + debug_unregister(dbf->hba); + debug_unregister(dbf->pay); + debug_unregister(dbf->rec); + kfree(dbf); +} + /** * zfcp_adapter_debug_register - registers debug feature for an adapter * @adapter: pointer to adapter for which debug features should be registered @@ -1001,69 +479,66 @@ static debug_info_t *zfcp_dbf_reg(const char *name, int level, */ int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) { - char dbf_name[DEBUG_MAX_NAME_LEN]; + char name[DEBUG_MAX_NAME_LEN]; struct zfcp_dbf *dbf; - dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); + dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); if (!dbf) return -ENOMEM; - dbf->adapter = adapter; - + spin_lock_init(&dbf->pay_lock); spin_lock_init(&dbf->hba_lock); spin_lock_init(&dbf->san_lock); spin_lock_init(&dbf->scsi_lock); spin_lock_init(&dbf->rec_lock); /* debug feature area which records recovery activity */ - sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); - dbf->rec = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_rec_view, - sizeof(struct zfcp_dbf_rec_record)); + sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); + dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec)); if (!dbf->rec) goto err_out; /* debug feature area which records HBA (FSF and QDIO) conditions */ - sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); - dbf->hba = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_hba_view, - sizeof(struct zfcp_dbf_hba_record)); + sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); + dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba)); if (!dbf->hba) goto err_out; + /* debug feature area which records payload info */ + sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev)); + dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay)); + if (!dbf->pay) + goto err_out; + /* debug feature area which records SAN command failures and recovery */ - sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); - dbf->san = zfcp_dbf_reg(dbf_name, 6, &zfcp_dbf_san_view, - sizeof(struct zfcp_dbf_san_record)); + sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); + dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san)); if (!dbf->san) goto err_out; /* debug feature area which records SCSI command failures and recovery */ - sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); - dbf->scsi = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_scsi_view, - sizeof(struct zfcp_dbf_scsi_record)); + sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); + dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi)); if (!dbf->scsi) goto err_out; adapter->dbf = dbf; - return 0; + return 0; err_out: - zfcp_dbf_adapter_unregister(dbf); + zfcp_dbf_unregister(dbf); return -ENOMEM; } /** * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter - * @dbf: pointer to dbf for which debug features should be unregistered + * @adapter: pointer to adapter for which debug features should be unregistered */ -void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) +void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter) { - if (!dbf) - return; - debug_unregister(dbf->scsi); - debug_unregister(dbf->san); - debug_unregister(dbf->hba); - debug_unregister(dbf->rec); - dbf->adapter->dbf = NULL; - kfree(dbf); + struct zfcp_dbf *dbf = adapter->dbf; + + adapter->dbf = NULL; + zfcp_dbf_unregister(dbf); } diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 457e046f2d2..0be3d48681a 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -1,22 +1,8 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver + * debug feature declarations * - * Copyright IBM Corp. 2008, 2009 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corp. 2008, 2010 */ #ifndef ZFCP_DBF_H @@ -27,338 +13,371 @@ #include "zfcp_fsf.h" #include "zfcp_def.h" -#define ZFCP_DBF_TAG_SIZE 4 -#define ZFCP_DBF_ID_SIZE 7 +#define ZFCP_DBF_TAG_LEN 7 #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull -struct zfcp_dbf_dump { - u8 tag[ZFCP_DBF_TAG_SIZE]; - u32 total_size; /* size of total dump data */ - u32 offset; /* how much data has being already dumped */ - u32 size; /* how much data comes with this record */ - u8 data[]; /* dump data */ -} __attribute__ ((packed)); - -struct zfcp_dbf_rec_record_thread { - u32 total; +/** + * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action + * @ready: number of ready recovery actions + * @running: number of running recovery actions + * @want: wanted recovery action + * @need: needed recovery action + */ +struct zfcp_dbf_rec_trigger { u32 ready; u32 running; -}; - -struct zfcp_dbf_rec_record_target { - u64 ref; - u32 status; - u32 d_id; - u64 wwpn; - u64 fcp_lun; - u32 erp_count; -}; - -struct zfcp_dbf_rec_record_trigger { u8 want; u8 need; - u32 as; - u32 ps; - u32 us; - u64 ref; - u64 action; - u64 wwpn; - u64 fcp_lun; -}; +} __packed; -struct zfcp_dbf_rec_record_action { - u32 status; - u32 step; - u64 action; - u64 fsf_req; +/** + * struct zfcp_dbf_rec_running - trace record for running recovery + * @fsf_req_id: request id for fsf requests + * @rec_status: status of the fsf request + * @rec_step: current step of the recovery action + * rec_count: recovery counter + */ +struct zfcp_dbf_rec_running { + u64 fsf_req_id; + u32 rec_status; + u16 rec_step; + u8 rec_action; + u8 rec_count; +} __packed; + +/** + * enum zfcp_dbf_rec_id - recovery trace record id + * @ZFCP_DBF_REC_TRIG: triggered recovery identifier + * @ZFCP_DBF_REC_RUN: running recovery identifier + */ +enum zfcp_dbf_rec_id { + ZFCP_DBF_REC_TRIG = 1, + ZFCP_DBF_REC_RUN = 2, }; -struct zfcp_dbf_rec_record { +/** + * struct zfcp_dbf_rec - trace record for error recovery actions + * @id: unique number of recovery record type + * @tag: identifier string specifying the location of initiation + * @lun: logical unit number + * @wwpn: word wide port number + * @d_id: destination ID + * @adapter_status: current status of the adapter + * @port_status: current status of the port + * @lun_status: current status of the lun + * @u.trig: structure zfcp_dbf_rec_trigger + * @u.run: structure zfcp_dbf_rec_running + */ +struct zfcp_dbf_rec { u8 id; - char id2[7]; + char tag[ZFCP_DBF_TAG_LEN]; + u64 lun; + u64 wwpn; + u32 d_id; + u32 adapter_status; + u32 port_status; + u32 lun_status; union { - struct zfcp_dbf_rec_record_action action; - struct zfcp_dbf_rec_record_thread thread; - struct zfcp_dbf_rec_record_target target; - struct zfcp_dbf_rec_record_trigger trigger; + struct zfcp_dbf_rec_trigger trig; + struct zfcp_dbf_rec_running run; } u; -}; +} __packed; -enum { - ZFCP_REC_DBF_ID_ACTION, - ZFCP_REC_DBF_ID_THREAD, - ZFCP_REC_DBF_ID_TARGET, - ZFCP_REC_DBF_ID_TRIGGER, +/** + * enum zfcp_dbf_san_id - SAN trace record identifier + * @ZFCP_DBF_SAN_REQ: request trace record id + * @ZFCP_DBF_SAN_RES: response trace record id + * @ZFCP_DBF_SAN_ELS: extended link service record id + */ +enum zfcp_dbf_san_id { + ZFCP_DBF_SAN_REQ = 1, + ZFCP_DBF_SAN_RES = 2, + ZFCP_DBF_SAN_ELS = 3, }; -struct zfcp_dbf_hba_record_response { - u32 fsf_command; - u64 fsf_reqid; - u32 fsf_seqno; - u64 fsf_issued; - u32 fsf_prot_status; +/** struct zfcp_dbf_san - trace record for SAN requests and responses + * @id: unique number of recovery record type + * @tag: identifier string specifying the location of initiation + * @fsf_req_id: request id for fsf requests + * @payload: unformatted information related to request/response + * @d_id: destination id + */ +struct zfcp_dbf_san { + u8 id; + char tag[ZFCP_DBF_TAG_LEN]; + u64 fsf_req_id; + u32 d_id; +#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32) + char payload[ZFCP_DBF_SAN_MAX_PAYLOAD]; +} __packed; + +/** + * struct zfcp_dbf_hba_res - trace record for hba responses + * @req_issued: timestamp when request was issued + * @prot_status: protocol status + * @prot_status_qual: protocol status qualifier + * @fsf_status: fsf status + * @fsf_status_qual: fsf status qualifier + */ +struct zfcp_dbf_hba_res { + u64 req_issued; + u32 prot_status; + u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; u32 fsf_status; - u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; - u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; - u32 fsf_req_status; - u8 sbal_first; - u8 sbal_last; - u8 sbal_response; - u8 pool; - u64 erp_action; - union { - struct { - u64 cmnd; - u64 serial; - } fcp; - struct { - u64 wwpn; - u32 d_id; - u32 port_handle; - } port; - struct { - u64 wwpn; - u64 fcp_lun; - u32 port_handle; - u32 lun_handle; - } unit; - struct { - u32 d_id; - } els; - } u; -} __attribute__ ((packed)); + u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; +} __packed; -struct zfcp_dbf_hba_record_status { - u8 failed; +/** + * struct zfcp_dbf_hba_uss - trace record for unsolicited status + * @status_type: type of unsolicited status + * @status_subtype: subtype of unsolicited status + * @d_id: destination ID + * @lun: logical unit number + * @queue_designator: queue designator + */ +struct zfcp_dbf_hba_uss { u32 status_type; u32 status_subtype; - struct fsf_queue_designator - queue_designator; - u32 payload_size; -#define ZFCP_DBF_UNSOL_PAYLOAD 80 -#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32 -#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56 -#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32) - u8 payload[ZFCP_DBF_UNSOL_PAYLOAD]; -} __attribute__ ((packed)); - -struct zfcp_dbf_hba_record_qdio { - u32 qdio_error; - u8 sbal_index; - u8 sbal_count; -} __attribute__ ((packed)); - -struct zfcp_dbf_hba_record { - u8 tag[ZFCP_DBF_TAG_SIZE]; - u8 tag2[ZFCP_DBF_TAG_SIZE]; - union { - struct zfcp_dbf_hba_record_response response; - struct zfcp_dbf_hba_record_status status; - struct zfcp_dbf_hba_record_qdio qdio; - struct fsf_bit_error_payload berr; - } u; -} __attribute__ ((packed)); - -struct zfcp_dbf_san_record_ct_request { - u16 cmd_req_code; - u8 revision; - u8 gs_type; - u8 gs_subtype; - u8 options; - u16 max_res_size; - u32 len; - u32 d_id; -} __attribute__ ((packed)); - -struct zfcp_dbf_san_record_ct_response { - u16 cmd_rsp_code; - u8 revision; - u8 reason_code; - u8 expl; - u8 vendor_unique; - u16 max_res_size; - u32 len; -} __attribute__ ((packed)); - -struct zfcp_dbf_san_record_els { u32 d_id; -} __attribute__ ((packed)); + u64 lun; + u64 queue_designator; +} __packed; + +/** + * enum zfcp_dbf_hba_id - HBA trace record identifier + * @ZFCP_DBF_HBA_RES: response trace record + * @ZFCP_DBF_HBA_USS: unsolicited status trace record + * @ZFCP_DBF_HBA_BIT: bit error trace record + */ +enum zfcp_dbf_hba_id { + ZFCP_DBF_HBA_RES = 1, + ZFCP_DBF_HBA_USS = 2, + ZFCP_DBF_HBA_BIT = 3, + ZFCP_DBF_HBA_BASIC = 4, +}; -struct zfcp_dbf_san_record { - u8 tag[ZFCP_DBF_TAG_SIZE]; - u64 fsf_reqid; - u32 fsf_seqno; +/** + * struct zfcp_dbf_hba - common trace record for HBA records + * @id: unique number of recovery record type + * @tag: identifier string specifying the location of initiation + * @fsf_req_id: request id for fsf requests + * @fsf_req_status: status of fsf request + * @fsf_cmd: fsf command + * @fsf_seq_no: fsf sequence number + * @pl_len: length of payload stored as zfcp_dbf_pay + * @u: record type specific data + */ +struct zfcp_dbf_hba { + u8 id; + char tag[ZFCP_DBF_TAG_LEN]; + u64 fsf_req_id; + u32 fsf_req_status; + u32 fsf_cmd; + u32 fsf_seq_no; + u16 pl_len; union { - struct zfcp_dbf_san_record_ct_request ct_req; - struct zfcp_dbf_san_record_ct_response ct_resp; - struct zfcp_dbf_san_record_els els; + struct zfcp_dbf_hba_res res; + struct zfcp_dbf_hba_uss uss; + struct fsf_bit_error_payload be; } u; -} __attribute__ ((packed)); +} __packed; -#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 +/** + * enum zfcp_dbf_scsi_id - scsi trace record identifier + * @ZFCP_DBF_SCSI_CMND: scsi command trace record + */ +enum zfcp_dbf_scsi_id { + ZFCP_DBF_SCSI_CMND = 1, +}; -struct zfcp_dbf_scsi_record { - u8 tag[ZFCP_DBF_TAG_SIZE]; - u8 tag2[ZFCP_DBF_TAG_SIZE]; +/** + * struct zfcp_dbf_scsi - common trace record for SCSI records + * @id: unique number of recovery record type + * @tag: identifier string specifying the location of initiation + * @scsi_id: scsi device id + * @scsi_lun: scsi device logical unit number + * @scsi_result: scsi result + * @scsi_retries: current retry number of scsi request + * @scsi_allowed: allowed retries + * @fcp_rsp_info: FCP response info + * @scsi_opcode: scsi opcode + * @fsf_req_id: request id of fsf request + * @host_scribble: LLD specific data attached to SCSI request + * @pl_len: length of paload stored as zfcp_dbf_pay + * @fsf_rsp: response for fsf request + */ +struct zfcp_dbf_scsi { + u8 id; + char tag[ZFCP_DBF_TAG_LEN]; u32 scsi_id; u32 scsi_lun; u32 scsi_result; - u64 scsi_cmnd; - u64 scsi_serial; -#define ZFCP_DBF_SCSI_OPCODE 16 - u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; u8 scsi_retries; u8 scsi_allowed; - u64 fsf_reqid; - u32 fsf_seqno; - u64 fsf_issued; - u64 old_fsf_reqid; - u8 rsp_validity; - u8 rsp_scsi_status; - u32 rsp_resid; - u8 rsp_code; -#define ZFCP_DBF_SCSI_FCP_SNS_INFO 16 -#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256 - u32 sns_info_len; - u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO]; -} __attribute__ ((packed)); + u8 fcp_rsp_info; +#define ZFCP_DBF_SCSI_OPCODE 16 + u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; + u64 fsf_req_id; + u64 host_scribble; + u16 pl_len; + struct fcp_resp_with_ext fcp_rsp; +} __packed; + +/** + * struct zfcp_dbf_pay - trace record for unformatted payload information + * @area: area this record is originated from + * @counter: ascending record number + * @fsf_req_id: request id of fsf request + * @data: unformatted data + */ +struct zfcp_dbf_pay { + u8 counter; + char area[ZFCP_DBF_TAG_LEN]; + u64 fsf_req_id; +#define ZFCP_DBF_PAY_MAX_REC 0x100 + char data[ZFCP_DBF_PAY_MAX_REC]; +} __packed; +/** + * struct zfcp_dbf - main dbf trace structure + * @pay: reference to payload trace area + * @rec: reference to recovery trace area + * @hba: reference to hba trace area + * @san: reference to san trace area + * @scsi: reference to scsi trace area + * @pay_lock: lock protecting payload trace buffer + * @rec_lock: lock protecting recovery trace buffer + * @hba_lock: lock protecting hba trace buffer + * @san_lock: lock protecting san trace buffer + * @scsi_lock: lock protecting scsi trace buffer + * @pay_buf: pre-allocated buffer for payload + * @rec_buf: pre-allocated buffer for recovery + * @hba_buf: pre-allocated buffer for hba + * @san_buf: pre-allocated buffer for san + * @scsi_buf: pre-allocated buffer for scsi + */ struct zfcp_dbf { + debug_info_t *pay; debug_info_t *rec; debug_info_t *hba; debug_info_t *san; debug_info_t *scsi; + spinlock_t pay_lock; spinlock_t rec_lock; spinlock_t hba_lock; spinlock_t san_lock; spinlock_t scsi_lock; - struct zfcp_dbf_rec_record rec_buf; - struct zfcp_dbf_hba_record hba_buf; - struct zfcp_dbf_san_record san_buf; - struct zfcp_dbf_scsi_record scsi_buf; - struct zfcp_adapter *adapter; + struct zfcp_dbf_pay pay_buf; + struct zfcp_dbf_rec rec_buf; + struct zfcp_dbf_hba hba_buf; + struct zfcp_dbf_san san_buf; + struct zfcp_dbf_scsi scsi_buf; }; static inline -void zfcp_dbf_hba_fsf_resp(const char *tag2, int level, - struct zfcp_fsf_req *req, struct zfcp_dbf *dbf) +void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) { - if (level <= dbf->hba->level) - _zfcp_dbf_hba_fsf_response(tag2, level, req, dbf); + if (debug_level_enabled(req->adapter->dbf->hba, level)) + zfcp_dbf_hba_fsf_res(tag, req); } /** * zfcp_dbf_hba_fsf_response - trace event for request completion - * @fsf_req: request that has been completed + * @req: request that has been completed */ -static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) +static inline +void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) { - struct zfcp_dbf *dbf = req->adapter->dbf; struct fsf_qtcb *qtcb = req->qtcb; if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { - zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf); + zfcp_dbf_hba_fsf_resp("fs_perr", 1, req); } else if (qtcb->header.fsf_status != FSF_GOOD) { - zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf); + zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req); } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || (req->fsf_command == FSF_QTCB_OPEN_LUN)) { - zfcp_dbf_hba_fsf_resp("open", 4, req, dbf); + zfcp_dbf_hba_fsf_resp("fs_open", 4, req); } else if (qtcb->header.log_length) { - zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf); + zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req); } else { - zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf); + zfcp_dbf_hba_fsf_resp("fs_norm", 6, req); } - } - -/** - * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer - * @tag: tag indicating which kind of unsolicited status has been received - * @dbf: reference to dbf structure - * @status_buffer: buffer containing payload of unsolicited status - */ -static inline -void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf, - struct fsf_status_read_buffer *buf) -{ - int level = 2; - - if (level <= dbf->hba->level) - _zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf); } static inline -void zfcp_dbf_scsi(const char *tag, const char *tag2, int level, - struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, - struct zfcp_fsf_req *req, unsigned long old_id) +void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd, + struct zfcp_fsf_req *req) { - if (level <= dbf->scsi->level) - _zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id); + struct zfcp_adapter *adapter = (struct zfcp_adapter *) + scmd->device->host->hostdata[0]; + + if (debug_level_enabled(adapter->dbf->scsi, level)) + zfcp_dbf_scsi(tag, scmd, req); } /** * zfcp_dbf_scsi_result - trace event for SCSI command completion - * @dbf: adapter dbf trace * @scmd: SCSI command pointer * @req: FSF request used to issue SCSI command */ static inline -void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, - struct zfcp_fsf_req *req) +void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req) { if (scmd->result != 0) - zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0); + _zfcp_dbf_scsi("rsl_err", 3, scmd, req); else if (scmd->retries > 0) - zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0); + _zfcp_dbf_scsi("rsl_ret", 4, scmd, req); else - zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0); + _zfcp_dbf_scsi("rsl_nor", 6, scmd, req); } /** * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command - * @dbf: adapter dbf trace * @scmd: SCSI command pointer */ static inline -void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd) +void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd) { - zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0); + _zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL); } /** * zfcp_dbf_scsi_abort - trace event for SCSI command abort * @tag: tag indicating success or failure of abort operation - * @adapter: adapter thas has been used to issue SCSI command to be aborted * @scmd: SCSI command to be aborted - * @new_req: request containing abort (might be NULL) - * @old_id: identifier of request containg SCSI command to be aborted + * @fsf_req: request containing abort (might be NULL) */ static inline -void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf, - struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req, - unsigned long old_id) +void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd, + struct zfcp_fsf_req *fsf_req) { - zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id); + _zfcp_dbf_scsi(tag, 1, scmd, fsf_req); } /** * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset * @tag: tag indicating success or failure of reset operation + * @scmnd: SCSI command which caused this error recovery * @flag: indicates type of reset (Target Reset, Logical Unit Reset) - * @unit: unit that needs reset - * @scsi_cmnd: SCSI command which caused this error recovery */ static inline -void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, - struct scsi_cmnd *scsi_cmnd) +void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag) { - zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1, - unit->port->adapter->dbf, scsi_cmnd, NULL, 0); + char tmp_tag[ZFCP_DBF_TAG_LEN]; + + if (flag == FCP_TMF_TGT_RESET) + memcpy(tmp_tag, "tr_", 3); + else + memcpy(tmp_tag, "lr_", 3); + + memcpy(&tmp_tag[3], tag, 4); + _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL); } #endif /* ZFCP_DBF_H */ diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 7131c7db1f0..d91173f326c 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -3,7 +3,7 @@ * * Global definitions for the zfcp device driver. * - * Copyright IBM Corporation 2002, 2010 + * Copyright IBM Corp. 2002, 2010 */ #ifndef ZFCP_DEF_H @@ -37,6 +37,7 @@ #include <asm/ebcdic.h> #include <asm/sysinfo.h> #include "zfcp_fsf.h" +#include "zfcp_fc.h" #include "zfcp_qdio.h" struct zfcp_reqlist; @@ -44,23 +45,6 @@ struct zfcp_reqlist; /********************* SCSI SPECIFIC DEFINES *********************************/ #define ZFCP_SCSI_ER_TIMEOUT (10*HZ) -/********************* CIO/QDIO SPECIFIC DEFINES *****************************/ - -/* DMQ bug workaround: don't use last SBALE */ -#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) - -/* index of last SBALE (with respect to DMQ bug workaround) */ -#define ZFCP_LAST_SBALE_PER_SBAL (ZFCP_MAX_SBALES_PER_SBAL - 1) - -/* max. number of (data buffer) SBALEs in largest SBAL chain */ -#define ZFCP_MAX_SBALES_PER_REQ \ - (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2) - /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */ - -#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8) - /* max. number of (data buffer) SBALEs in largest SBAL chain - multiplied with number of sectors per 4k block */ - /********************* FSF SPECIFIC DEFINES *********************************/ /* ATTENTION: value must not be used by hardware */ @@ -88,22 +72,21 @@ struct zfcp_reqlist; #define ZFCP_STATUS_COMMON_NOESC 0x00200000 /* adapter status */ +#define ZFCP_STATUS_ADAPTER_MB_ACT 0x00000001 #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 +#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 +#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040 #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 +#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400 /* remote port status */ #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 #define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 -/* logical unit status */ -#define ZFCP_STATUS_UNIT_SHARED 0x00000004 -#define ZFCP_STATUS_UNIT_READONLY 0x00000008 - /* FSF request status (this does not have a common part) */ -#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 #define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 #define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 @@ -122,7 +105,7 @@ struct zfcp_adapter_mempool { mempool_t *scsi_req; mempool_t *scsi_abort; mempool_t *status_read_req; - mempool_t *status_read_data; + mempool_t *sr_data; mempool_t *gid_pn; mempool_t *qtcb_pool; }; @@ -132,7 +115,7 @@ struct zfcp_erp_action { int action; /* requested action code */ struct zfcp_adapter *adapter; /* device which should be recovered */ struct zfcp_port *port; - struct zfcp_unit *unit; + struct scsi_device *sdev; u32 status; /* recovery status */ u32 step; /* active step of this erp action */ unsigned long fsf_req_id; @@ -181,6 +164,7 @@ struct zfcp_adapter { stack abort/command completion races */ atomic_t stat_miss; /* # missing status reads*/ + unsigned int stat_read_buf_num; struct work_struct stat_work; atomic_t status; /* status of this adapter */ struct list_head erp_ready_head; /* error recovery for this @@ -203,8 +187,11 @@ struct zfcp_adapter { struct fsf_qtcb_bottom_port *stats_reset_data; unsigned long stats_reset; struct work_struct scan_work; + struct work_struct ns_up_work; struct service_level service_level; struct workqueue_struct *work_queue; + struct device_dma_parameters dma_parms; + struct zfcp_fc_events events; }; struct zfcp_port { @@ -214,6 +201,7 @@ struct zfcp_port { struct zfcp_adapter *adapter; /* adapter used to access port */ struct list_head unit_list; /* head of logical unit list */ rwlock_t unit_list_lock; /* unit list lock */ + atomic_t units; /* zfcp_unit count */ atomic_t status; /* status of this remote port */ u64 wwnn; /* WWNN if known */ u64 wwpn; /* WWPN */ @@ -227,23 +215,69 @@ struct zfcp_port { struct work_struct test_link_work; struct work_struct rport_work; enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task; + unsigned int starget_id; }; +/** + * struct zfcp_unit - LUN configured via zfcp sysfs + * @dev: struct device for sysfs representation and reference counting + * @list: entry in LUN/unit list per zfcp_port + * @port: reference to zfcp_port where this LUN is configured + * @fcp_lun: 64 bit LUN value + * @scsi_work: for running scsi_scan_target + * + * This is the representation of a LUN that has been configured for + * usage. The main data here is the 64 bit LUN value, data for + * running I/O and recovery is in struct zfcp_scsi_dev. + */ struct zfcp_unit { - struct device dev; - struct list_head list; /* list of logical units */ - struct zfcp_port *port; /* remote port of unit */ - atomic_t status; /* status of this logical unit */ - u64 fcp_lun; /* own FCP_LUN */ - u32 handle; /* handle assigned by FSF */ - struct scsi_device *device; /* scsi device struct pointer */ - struct zfcp_erp_action erp_action; /* pending error recovery */ - atomic_t erp_counter; - struct zfcp_latencies latencies; + struct device dev; + struct list_head list; + struct zfcp_port *port; + u64 fcp_lun; struct work_struct scsi_work; }; /** + * struct zfcp_scsi_dev - zfcp data per SCSI device + * @status: zfcp internal status flags + * @lun_handle: handle from "open lun" for issuing FSF requests + * @erp_action: zfcp erp data for opening and recovering this LUN + * @erp_counter: zfcp erp counter for this LUN + * @latencies: FSF channel and fabric latencies + * @port: zfcp_port where this LUN belongs to + */ +struct zfcp_scsi_dev { + atomic_t status; + u32 lun_handle; + struct zfcp_erp_action erp_action; + atomic_t erp_counter; + struct zfcp_latencies latencies; + struct zfcp_port *port; +}; + +/** + * sdev_to_zfcp - Access zfcp LUN data for SCSI device + * @sdev: scsi_device where to get the zfcp_scsi_dev pointer + */ +static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev) +{ + return scsi_transport_device_data(sdev); +} + +/** + * zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN + * @sdev: SCSI device where to get the LUN from + */ +static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev) +{ + u64 fcp_lun; + + int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun); + return fcp_lun; +} + +/** * struct zfcp_fsf_req - basic FSF request structure * @list: list of FSF requests * @req_id: unique request ID @@ -259,7 +293,6 @@ struct zfcp_unit { * @erp_action: reference to erp action if request issued on behalf of ERP * @pool: reference to memory pool if used for this request * @issued: time when request was send (STCK) - * @unit: reference to unit if this request is a SCSI request * @handler: handler which should be called to process response */ struct zfcp_fsf_req { @@ -277,24 +310,13 @@ struct zfcp_fsf_req { struct zfcp_erp_action *erp_action; mempool_t *pool; unsigned long long issued; - struct zfcp_unit *unit; void (*handler)(struct zfcp_fsf_req *); }; -/* driver data */ -struct zfcp_data { - struct scsi_host_template scsi_host_template; - struct scsi_transport_template *scsi_transport_template; - struct kmem_cache *gpn_ft_cache; - struct kmem_cache *qtcb_cache; - struct kmem_cache *sr_buffer_cache; - struct kmem_cache *gid_pn_cache; - struct kmem_cache *adisc_cache; -}; - -/********************** ZFCP SPECIFIC DEFINES ********************************/ - -#define ZFCP_SET 0x00000100 -#define ZFCP_CLEAR 0x00000200 +static inline +int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter) +{ + return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT; +} #endif /* ZFCP_DEF_H */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 0be5e7ea282..c82fe65c412 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -3,7 +3,7 @@ * * Error Recovery Procedures (ERP). * - * Copyright IBM Corporation 2002, 2010 + * Copyright IBM Corp. 2002, 2010 */ #define KMSG_COMPONENT "zfcp" @@ -21,6 +21,7 @@ enum zfcp_erp_act_flags { ZFCP_STATUS_ERP_DISMISSING = 0x00100000, ZFCP_STATUS_ERP_DISMISSED = 0x00200000, ZFCP_STATUS_ERP_LOWMEM = 0x00400000, + ZFCP_STATUS_ERP_NO_REF = 0x00800000, }; enum zfcp_erp_steps { @@ -29,12 +30,12 @@ enum zfcp_erp_steps { ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, ZFCP_ERP_STEP_PORT_OPENING = 0x0800, - ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, - ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, + ZFCP_ERP_STEP_LUN_CLOSING = 0x1000, + ZFCP_ERP_STEP_LUN_OPENING = 0x2000, }; enum zfcp_erp_act_type { - ZFCP_ERP_ACTION_REOPEN_UNIT = 1, + ZFCP_ERP_ACTION_REOPEN_LUN = 1, ZFCP_ERP_ACTION_REOPEN_PORT = 2, ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, @@ -56,9 +57,8 @@ enum zfcp_erp_act_result { static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask) { - zfcp_erp_modify_adapter_status(adapter, "erablk1", NULL, - ZFCP_STATUS_COMMON_UNBLOCKED | mask, - ZFCP_CLEAR); + zfcp_erp_clear_adapter_status(adapter, + ZFCP_STATUS_COMMON_UNBLOCKED | mask); } static int zfcp_erp_action_exists(struct zfcp_erp_action *act) @@ -76,9 +76,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act) struct zfcp_adapter *adapter = act->adapter; list_move(&act->list, &act->adapter->erp_ready_head); - zfcp_dbf_rec_action("erardy1", act); + zfcp_dbf_rec_run("erardy1", act); wake_up(&adapter->erp_ready_wq); - zfcp_dbf_rec_thread("erardy2", adapter->dbf); + zfcp_dbf_rec_run("erardy2", act); } static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) @@ -88,23 +88,26 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) zfcp_erp_action_ready(act); } -static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) +static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev) { - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE) - zfcp_erp_action_dismiss(&unit->erp_action); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE) + zfcp_erp_action_dismiss(&zfcp_sdev->erp_action); } static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) { - struct zfcp_unit *unit; + struct scsi_device *sdev; if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) zfcp_erp_action_dismiss(&port->erp_action); else { - read_lock(&port->unit_list_lock); - list_for_each_entry(unit, &port->unit_list, list) - zfcp_erp_action_dismiss_unit(unit); - read_unlock(&port->unit_list_lock); + spin_lock(port->adapter->scsi_host->host_lock); + __shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + zfcp_erp_action_dismiss_lun(sdev); + spin_unlock(port->adapter->scsi_host->host_lock); } } @@ -124,15 +127,17 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, struct zfcp_port *port, - struct zfcp_unit *unit) + struct scsi_device *sdev) { int need = want; - int u_status, p_status, a_status; + int l_status, p_status, a_status; + struct zfcp_scsi_dev *zfcp_sdev; switch (want) { - case ZFCP_ERP_ACTION_REOPEN_UNIT: - u_status = atomic_read(&unit->status); - if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE) + case ZFCP_ERP_ACTION_REOPEN_LUN: + zfcp_sdev = sdev_to_zfcp(sdev); + l_status = atomic_read(&zfcp_sdev->status); + if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE) return 0; p_status = atomic_read(&port->status); if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || @@ -141,15 +146,21 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) need = ZFCP_ERP_ACTION_REOPEN_PORT; /* fall through */ - case ZFCP_ERP_ACTION_REOPEN_PORT: case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: p_status = atomic_read(&port->status); + if (!(p_status & ZFCP_STATUS_COMMON_OPEN)) + need = ZFCP_ERP_ACTION_REOPEN_PORT; + /* fall through */ + case ZFCP_ERP_ACTION_REOPEN_PORT: + p_status = atomic_read(&port->status); if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE) return 0; a_status = atomic_read(&adapter->status); if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) || a_status & ZFCP_STATUS_COMMON_ERP_FAILED) return 0; + if (p_status & ZFCP_STATUS_COMMON_NOESC) + return need; if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED)) need = ZFCP_ERP_ACTION_REOPEN_ADAPTER; /* fall through */ @@ -165,22 +176,29 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, return need; } -static struct zfcp_erp_action *zfcp_erp_setup_act(int need, +static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, struct zfcp_adapter *adapter, struct zfcp_port *port, - struct zfcp_unit *unit) + struct scsi_device *sdev) { struct zfcp_erp_action *erp_action; - u32 status = 0; + struct zfcp_scsi_dev *zfcp_sdev; switch (need) { - case ZFCP_ERP_ACTION_REOPEN_UNIT: - if (!get_device(&unit->dev)) - return NULL; - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); - erp_action = &unit->erp_action; - if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) - status = ZFCP_STATUS_ERP_CLOSE_ONLY; + case ZFCP_ERP_ACTION_REOPEN_LUN: + zfcp_sdev = sdev_to_zfcp(sdev); + if (!(act_status & ZFCP_STATUS_ERP_NO_REF)) + if (scsi_device_get(sdev)) + return NULL; + atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, + &zfcp_sdev->status); + erp_action = &zfcp_sdev->erp_action; + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); + erp_action->port = port; + erp_action->sdev = sdev; + if (!(atomic_read(&zfcp_sdev->status) & + ZFCP_STATUS_COMMON_RUNNING)) + act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; case ZFCP_ERP_ACTION_REOPEN_PORT: @@ -190,8 +208,10 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, zfcp_erp_action_dismiss_port(port); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); erp_action = &port->erp_action; + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); + erp_action->port = port; if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) - status = ZFCP_STATUS_ERP_CLOSE_ONLY; + act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: @@ -199,66 +219,65 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, zfcp_erp_action_dismiss_adapter(adapter); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); erp_action = &adapter->erp_action; + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) - status = ZFCP_STATUS_ERP_CLOSE_ONLY; + act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; default: return NULL; } - memset(erp_action, 0, sizeof(struct zfcp_erp_action)); erp_action->adapter = adapter; - erp_action->port = port; - erp_action->unit = unit; erp_action->action = need; - erp_action->status = status; + erp_action->status = act_status; return erp_action; } static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, struct zfcp_port *port, - struct zfcp_unit *unit, char *id, void *ref) + struct scsi_device *sdev, + char *id, u32 act_status) { int retval = 1, need; - struct zfcp_erp_action *act = NULL; + struct zfcp_erp_action *act; if (!adapter->erp_thread) return -EIO; - need = zfcp_erp_required_act(want, adapter, port, unit); + need = zfcp_erp_required_act(want, adapter, port, sdev); if (!need) goto out; - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); - act = zfcp_erp_setup_act(need, adapter, port, unit); + act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); if (!act) goto out; + atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); ++adapter->erp_total_count; list_add_tail(&act->list, &adapter->erp_ready_head); wake_up(&adapter->erp_ready_wq); - zfcp_dbf_rec_thread("eracte1", adapter->dbf); retval = 0; out: - zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit); + zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need); return retval; } static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, - int clear_mask, char *id, void *ref) + int clear_mask, char *id) { zfcp_erp_adapter_block(adapter, clear_mask); zfcp_scsi_schedule_rports_block(adapter); /* ensure propagation of failed status to new devices */ if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { - zfcp_erp_adapter_failed(adapter, "erareo1", NULL); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_ERP_FAILED); return -EIO; } return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, - adapter, NULL, NULL, id, ref); + adapter, NULL, NULL, id, 0); } /** @@ -266,10 +285,8 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, * @adapter: Adapter to reopen. * @clear: Status flags to clear. * @id: Id for debug trace event. - * @ref: Reference for debug trace event. */ -void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, - char *id, void *ref) +void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id) { unsigned long flags; @@ -278,10 +295,11 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, write_lock_irqsave(&adapter->erp_lock, flags); if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - zfcp_erp_adapter_failed(adapter, "erareo1", NULL); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_ERP_FAILED); else zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, - NULL, NULL, id, ref); + NULL, NULL, id, 0); write_unlock_irqrestore(&adapter->erp_lock, flags); } @@ -290,13 +308,12 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, * @adapter: Adapter to shut down. * @clear: Status flags to clear. * @id: Id for debug trace event. - * @ref: Reference for debug trace event. */ void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear, - char *id, void *ref) + char *id) { int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; - zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref); + zfcp_erp_adapter_reopen(adapter, clear | flags, id); } /** @@ -304,38 +321,21 @@ void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear, * @port: Port to shut down. * @clear: Status flags to clear. * @id: Id for debug trace event. - * @ref: Reference for debug trace event. - */ -void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id, - void *ref) -{ - int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; - zfcp_erp_port_reopen(port, clear | flags, id, ref); -} - -/** - * zfcp_erp_unit_shutdown - Shutdown unit - * @unit: Unit to shut down. - * @clear: Status flags to clear. - * @id: Id for debug trace event. - * @ref: Reference for debug trace event. */ -void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id, - void *ref) +void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id) { int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; - zfcp_erp_unit_reopen(unit, clear | flags, id, ref); + zfcp_erp_port_reopen(port, clear | flags, id); } static void zfcp_erp_port_block(struct zfcp_port *port, int clear) { - zfcp_erp_modify_port_status(port, "erpblk1", NULL, - ZFCP_STATUS_COMMON_UNBLOCKED | clear, - ZFCP_CLEAR); + zfcp_erp_clear_port_status(port, + ZFCP_STATUS_COMMON_UNBLOCKED | clear); } -static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, - int clear, char *id, void *ref) +static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, + char *id) { zfcp_erp_port_block(port, clear); zfcp_scsi_schedule_rport_block(port); @@ -344,136 +344,171 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, return; zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, - port->adapter, port, NULL, id, ref); + port->adapter, port, NULL, id, 0); } /** * zfcp_erp_port_forced_reopen - Forced close of port and open again * @port: Port to force close and to reopen. + * @clear: Status flags to clear. * @id: Id for debug trace event. - * @ref: Reference for debug trace event. */ -void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id, - void *ref) +void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id) { unsigned long flags; struct zfcp_adapter *adapter = port->adapter; write_lock_irqsave(&adapter->erp_lock, flags); - _zfcp_erp_port_forced_reopen(port, clear, id, ref); + _zfcp_erp_port_forced_reopen(port, clear, id); write_unlock_irqrestore(&adapter->erp_lock, flags); } -static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, - void *ref) +static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) { zfcp_erp_port_block(port, clear); zfcp_scsi_schedule_rport_block(port); if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { /* ensure propagation of failed status to new devices */ - zfcp_erp_port_failed(port, "erpreo1", NULL); + zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); return -EIO; } return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, - port->adapter, port, NULL, id, ref); + port->adapter, port, NULL, id, 0); } /** * zfcp_erp_port_reopen - trigger remote port recovery * @port: port to recover * @clear_mask: flags in port status to be cleared + * @id: Id for debug trace event. * * Returns 0 if recovery has been triggered, < 0 if not. */ -int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) +int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) { int retval; unsigned long flags; struct zfcp_adapter *adapter = port->adapter; write_lock_irqsave(&adapter->erp_lock, flags); - retval = _zfcp_erp_port_reopen(port, clear, id, ref); + retval = _zfcp_erp_port_reopen(port, clear, id); write_unlock_irqrestore(&adapter->erp_lock, flags); return retval; } -static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) +static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask) { - zfcp_erp_modify_unit_status(unit, "erublk1", NULL, - ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, - ZFCP_CLEAR); + zfcp_erp_clear_lun_status(sdev, + ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask); } -static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, - void *ref) +static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, + u32 act_status) { - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; - zfcp_erp_unit_block(unit, clear); + zfcp_erp_lun_block(sdev, clear); - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) return; - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, - adapter, unit->port, unit, id, ref); + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, + zfcp_sdev->port, sdev, id, act_status); } /** - * zfcp_erp_unit_reopen - initiate reopen of a unit - * @unit: unit to be reopened - * @clear_mask: specifies flags in unit status to be cleared + * zfcp_erp_lun_reopen - initiate reopen of a LUN + * @sdev: SCSI device / LUN to be reopened + * @clear_mask: specifies flags in LUN status to be cleared + * @id: Id for debug trace event. + * * Return: 0 on success, < 0 on error */ -void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, - void *ref) +void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id) { unsigned long flags; - struct zfcp_port *port = unit->port; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_port *port = zfcp_sdev->port; struct zfcp_adapter *adapter = port->adapter; write_lock_irqsave(&adapter->erp_lock, flags); - _zfcp_erp_unit_reopen(unit, clear, id, ref); + _zfcp_erp_lun_reopen(sdev, clear, id, 0); write_unlock_irqrestore(&adapter->erp_lock, flags); } -static int status_change_set(unsigned long mask, atomic_t *status) +/** + * zfcp_erp_lun_shutdown - Shutdown LUN + * @sdev: SCSI device / LUN to shut down. + * @clear: Status flags to clear. + * @id: Id for debug trace event. + */ +void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id) { - return (atomic_read(status) ^ mask) & mask; + int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; + zfcp_erp_lun_reopen(sdev, clear | flags, id); } -static int status_change_clear(unsigned long mask, atomic_t *status) +/** + * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion + * @sdev: SCSI device / LUN to shut down. + * @id: Id for debug trace event. + * + * Do not acquire a reference for the LUN when creating the ERP + * action. It is safe, because this function waits for the ERP to + * complete first. This allows to shutdown the LUN, even when the SCSI + * device is in the state SDEV_DEL when scsi_device_get will fail. + */ +void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id) { - return atomic_read(status) & mask; + unsigned long flags; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_port *port = zfcp_sdev->port; + struct zfcp_adapter *adapter = port->adapter; + int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; + + write_lock_irqsave(&adapter->erp_lock, flags); + _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF); + write_unlock_irqrestore(&adapter->erp_lock, flags); + + zfcp_erp_wait(adapter); +} + +static int status_change_set(unsigned long mask, atomic_t *status) +{ + return (atomic_read(status) ^ mask) & mask; } static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) - zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf); + zfcp_dbf_rec_run("eraubl1", &adapter->erp_action); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); } static void zfcp_erp_port_unblock(struct zfcp_port *port) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) - zfcp_dbf_rec_port("erpubl1", NULL, port); + zfcp_dbf_rec_run("erpubl1", &port->erp_action); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); } -static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) +static void zfcp_erp_lun_unblock(struct scsi_device *sdev) { - if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) - zfcp_dbf_rec_unit("eruubl1", NULL, unit); - atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) + zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action); + atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); } static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) { list_move(&erp_action->list, &erp_action->adapter->erp_running_head); - zfcp_dbf_rec_action("erator1", erp_action); + zfcp_dbf_rec_run("erator1", erp_action); } static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) @@ -490,11 +525,11 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) if (act->status & (ZFCP_STATUS_ERP_DISMISSED | ZFCP_STATUS_ERP_TIMEDOUT)) { req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; - zfcp_dbf_rec_action("erscf_1", act); + zfcp_dbf_rec_run("erscf_1", act); req->erp_action = NULL; } if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) - zfcp_dbf_rec_action("erscf_2", act); + zfcp_dbf_rec_run("erscf_2", act); if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) act->fsf_req_id = 0; } else @@ -545,41 +580,42 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) } static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, - int clear, char *id, void *ref) + int clear, char *id) { struct zfcp_port *port; read_lock(&adapter->port_list_lock); list_for_each_entry(port, &adapter->port_list, list) - _zfcp_erp_port_reopen(port, clear, id, ref); + _zfcp_erp_port_reopen(port, clear, id); read_unlock(&adapter->port_list_lock); } -static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, - char *id, void *ref) +static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, + char *id) { - struct zfcp_unit *unit; + struct scsi_device *sdev; - read_lock(&port->unit_list_lock); - list_for_each_entry(unit, &port->unit_list, list) - _zfcp_erp_unit_reopen(unit, clear, id, ref); - read_unlock(&port->unit_list_lock); + spin_lock(port->adapter->scsi_host->host_lock); + __shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + _zfcp_erp_lun_reopen(sdev, clear, id, 0); + spin_unlock(port->adapter->scsi_host->host_lock); } static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) { switch (act->action) { case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL); + _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1"); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL); + _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2"); break; case ZFCP_ERP_ACTION_REOPEN_PORT: - _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); + _zfcp_erp_port_reopen(act->port, 0, "ersff_3"); break; - case ZFCP_ERP_ACTION_REOPEN_UNIT: - _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL); + case ZFCP_ERP_ACTION_REOPEN_LUN: + _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0); break; } } @@ -588,13 +624,13 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act) { switch (act->action) { case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL); + _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1"); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); + _zfcp_erp_port_reopen(act->port, 0, "ersfs_2"); break; case ZFCP_ERP_ACTION_REOPEN_PORT: - _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL); + _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3"); break; } } @@ -613,17 +649,6 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) read_unlock_irqrestore(&adapter->erp_lock, flags); } -static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) -{ - struct zfcp_qdio *qdio = act->adapter->qdio; - - if (zfcp_qdio_open(qdio)) - return ZFCP_ERP_FAILED; - init_waitqueue_head(&qdio->req_q_wq); - atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status); - return ZFCP_ERP_SUCCEEDED; -} - static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) { struct zfcp_port *port; @@ -631,7 +656,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) adapter->peer_d_id); if (IS_ERR(port)) /* error or port already attached */ return; - _zfcp_erp_port_reopen(port, 0, "ereptp1", NULL); + _zfcp_erp_port_reopen(port, 0, "ereptp1"); } static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) @@ -654,10 +679,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) return ZFCP_ERP_FAILED; } - zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf); wait_event(adapter->erp_ready_wq, !list_empty(&adapter->erp_ready_head)); - zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf); if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) break; @@ -696,10 +719,10 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act) if (ret) return ZFCP_ERP_FAILED; - zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf); + zfcp_dbf_rec_run("erasox1", act); wait_event(adapter->erp_ready_wq, !list_empty(&adapter->erp_ready_head)); - zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf); + zfcp_dbf_rec_run("erasox2", act); if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) return ZFCP_ERP_FAILED; @@ -714,7 +737,15 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act) if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) return ZFCP_ERP_FAILED; - atomic_set(&act->adapter->stat_miss, 16); + if (mempool_resize(act->adapter->pool.sr_data, + act->adapter->stat_read_buf_num, GFP_KERNEL)) + return ZFCP_ERP_FAILED; + + if (mempool_resize(act->adapter->pool.status_read_req, + act->adapter->stat_read_buf_num, GFP_KERNEL)) + return ZFCP_ERP_FAILED; + + atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num); if (zfcp_status_read_refill(act->adapter)) return ZFCP_ERP_FAILED; @@ -730,9 +761,8 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) zfcp_fsf_req_dismiss_all(adapter); adapter->fsf_req_seq_no = 0; zfcp_fc_wka_ports_force_offline(adapter->gs); - /* all ports and units are closed */ - zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, - ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); + /* all ports and LUNs are closed */ + zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN); atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); @@ -742,7 +772,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act) { struct zfcp_adapter *adapter = act->adapter; - if (zfcp_erp_adapter_strategy_open_qdio(act)) { + if (zfcp_qdio_open(adapter->qdio)) { atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); @@ -849,7 +879,7 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act) struct zfcp_port *port = act->port; if (port->wwpn != adapter->peer_wwpn) { - zfcp_erp_port_failed(port, "eroptp1", NULL); + zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); return ZFCP_ERP_FAILED; } port->d_id = adapter->peer_d_id; @@ -885,8 +915,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) } if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { port->d_id = 0; - _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL); - return ZFCP_ERP_EXIT; + return ZFCP_ERP_FAILED; } /* fall through otherwise */ } @@ -922,82 +951,86 @@ close_init_done: return zfcp_erp_port_strategy_open_common(erp_action); } -static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) +static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev) { - atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | - ZFCP_STATUS_UNIT_SHARED | - ZFCP_STATUS_UNIT_READONLY, - &unit->status); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, + &zfcp_sdev->status); } -static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action) +static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action) { - int retval = zfcp_fsf_close_unit(erp_action); + int retval = zfcp_fsf_close_lun(erp_action); if (retval == -ENOMEM) return ZFCP_ERP_NOMEM; - erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; + erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING; if (retval) return ZFCP_ERP_FAILED; return ZFCP_ERP_CONTINUES; } -static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action) +static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action) { - int retval = zfcp_fsf_open_unit(erp_action); + int retval = zfcp_fsf_open_lun(erp_action); if (retval == -ENOMEM) return ZFCP_ERP_NOMEM; - erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING; + erp_action->step = ZFCP_ERP_STEP_LUN_OPENING; if (retval) return ZFCP_ERP_FAILED; return ZFCP_ERP_CONTINUES; } -static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action) +static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action) { - struct zfcp_unit *unit = erp_action->unit; + struct scsi_device *sdev = erp_action->sdev; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); switch (erp_action->step) { case ZFCP_ERP_STEP_UNINITIALIZED: - zfcp_erp_unit_strategy_clearstati(unit); - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) - return zfcp_erp_unit_strategy_close(erp_action); + zfcp_erp_lun_strategy_clearstati(sdev); + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) + return zfcp_erp_lun_strategy_close(erp_action); /* already closed, fall through */ - case ZFCP_ERP_STEP_UNIT_CLOSING: - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) + case ZFCP_ERP_STEP_LUN_CLOSING: + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) return ZFCP_ERP_FAILED; if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) return ZFCP_ERP_EXIT; - return zfcp_erp_unit_strategy_open(erp_action); + return zfcp_erp_lun_strategy_open(erp_action); - case ZFCP_ERP_STEP_UNIT_OPENING: - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) + case ZFCP_ERP_STEP_LUN_OPENING: + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) return ZFCP_ERP_SUCCEEDED; } return ZFCP_ERP_FAILED; } -static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) +static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result) { + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + switch (result) { case ZFCP_ERP_SUCCEEDED : - atomic_set(&unit->erp_counter, 0); - zfcp_erp_unit_unblock(unit); + atomic_set(&zfcp_sdev->erp_counter, 0); + zfcp_erp_lun_unblock(sdev); break; case ZFCP_ERP_FAILED : - atomic_inc(&unit->erp_counter); - if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) { - dev_err(&unit->port->adapter->ccw_device->dev, - "ERP failed for unit 0x%016Lx on " + atomic_inc(&zfcp_sdev->erp_counter); + if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) { + dev_err(&zfcp_sdev->port->adapter->ccw_device->dev, + "ERP failed for LUN 0x%016Lx on " "port 0x%016Lx\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_unit_failed(unit, "erusck1", NULL); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_set_lun_status(sdev, + ZFCP_STATUS_COMMON_ERP_FAILED); } break; } - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { - zfcp_erp_unit_block(unit, 0); + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { + zfcp_erp_lun_block(sdev, 0); result = ZFCP_ERP_EXIT; } return result; @@ -1021,7 +1054,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result) dev_err(&port->adapter->ccw_device->dev, "ERP failed for remote port 0x%016Lx\n", (unsigned long long)port->wwpn); - zfcp_erp_port_failed(port, "erpsck1", NULL); + zfcp_erp_set_port_status(port, + ZFCP_STATUS_COMMON_ERP_FAILED); } break; } @@ -1048,7 +1082,8 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, dev_err(&adapter->ccw_device->dev, "ERP cannot recover an error " "on the FCP device\n"); - zfcp_erp_adapter_failed(adapter, "erasck1", NULL); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_ERP_FAILED); } break; } @@ -1065,12 +1100,12 @@ static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, { struct zfcp_adapter *adapter = erp_action->adapter; struct zfcp_port *port = erp_action->port; - struct zfcp_unit *unit = erp_action->unit; + struct scsi_device *sdev = erp_action->sdev; switch (erp_action->action) { - case ZFCP_ERP_ACTION_REOPEN_UNIT: - result = zfcp_erp_strategy_check_unit(unit, result); + case ZFCP_ERP_ACTION_REOPEN_LUN: + result = zfcp_erp_strategy_check_lun(sdev, result); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: @@ -1105,7 +1140,8 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) int action = act->action; struct zfcp_adapter *adapter = act->adapter; struct zfcp_port *port = act->port; - struct zfcp_unit *unit = act->unit; + struct scsi_device *sdev = act->sdev; + struct zfcp_scsi_dev *zfcp_sdev; u32 erp_status = act->status; switch (action) { @@ -1113,7 +1149,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) { _zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, - "ersscg1", NULL); + "ersscg1"); return ZFCP_ERP_EXIT; } break; @@ -1123,16 +1159,17 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) if (zfcp_erp_strat_change_det(&port->status, erp_status)) { _zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, - "ersscg2", NULL); + "ersscg2"); return ZFCP_ERP_EXIT; } break; - case ZFCP_ERP_ACTION_REOPEN_UNIT: - if (zfcp_erp_strat_change_det(&unit->status, erp_status)) { - _zfcp_erp_unit_reopen(unit, - ZFCP_STATUS_COMMON_ERP_FAILED, - "ersscg3", NULL); + case ZFCP_ERP_ACTION_REOPEN_LUN: + zfcp_sdev = sdev_to_zfcp(sdev); + if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) { + _zfcp_erp_lun_reopen(sdev, + ZFCP_STATUS_COMMON_ERP_FAILED, + "ersscg3", 0); return ZFCP_ERP_EXIT; } break; @@ -1143,6 +1180,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) { struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_scsi_dev *zfcp_sdev; adapter->erp_total_count--; if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { @@ -1151,12 +1189,13 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) } list_del(&erp_action->list); - zfcp_dbf_rec_action("eractd1", erp_action); + zfcp_dbf_rec_run("eractd1", erp_action); switch (erp_action->action) { - case ZFCP_ERP_ACTION_REOPEN_UNIT: + case ZFCP_ERP_ACTION_REOPEN_LUN: + zfcp_sdev = sdev_to_zfcp(erp_action->sdev); atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, - &erp_action->unit->status); + &zfcp_sdev->status); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: @@ -1176,32 +1215,30 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) { struct zfcp_adapter *adapter = act->adapter; struct zfcp_port *port = act->port; - struct zfcp_unit *unit = act->unit; + struct scsi_device *sdev = act->sdev; switch (act->action) { - case ZFCP_ERP_ACTION_REOPEN_UNIT: - if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { - get_device(&unit->dev); - if (scsi_queue_work(unit->port->adapter->scsi_host, - &unit->scsi_work) <= 0) - put_device(&unit->dev); - } - put_device(&unit->dev); + case ZFCP_ERP_ACTION_REOPEN_LUN: + if (!(act->status & ZFCP_STATUS_ERP_NO_REF)) + scsi_device_put(sdev); break; - case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: case ZFCP_ERP_ACTION_REOPEN_PORT: if (result == ZFCP_ERP_SUCCEEDED) zfcp_scsi_schedule_rport_register(port); + /* fall through */ + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: put_device(&port->dev); break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: if (result == ZFCP_ERP_SUCCEEDED) { register_service_level(&adapter->service_level); - queue_work(adapter->work_queue, &adapter->scan_work); + zfcp_fc_conditional_port_scan(adapter); + queue_work(adapter->work_queue, &adapter->ns_up_work); } else unregister_service_level(&adapter->service_level); + kref_put(&adapter->ref, zfcp_adapter_release); break; } @@ -1216,8 +1253,8 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) return zfcp_erp_port_forced_strategy(erp_action); case ZFCP_ERP_ACTION_REOPEN_PORT: return zfcp_erp_port_strategy(erp_action); - case ZFCP_ERP_ACTION_REOPEN_UNIT: - return zfcp_erp_unit_strategy(erp_action); + case ZFCP_ERP_ACTION_REOPEN_LUN: + return zfcp_erp_lun_strategy(erp_action); } return ZFCP_ERP_FAILED; } @@ -1239,6 +1276,11 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) goto unlock; } + if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { + retval = ZFCP_ERP_FAILED; + goto check_target; + } + zfcp_erp_action_to_running(erp_action); /* no lock to allow for blocking operations */ @@ -1256,7 +1298,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) erp_action->status |= ZFCP_STATUS_ERP_LOWMEM; } if (adapter->erp_total_count == adapter->erp_low_mem_count) - _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL); + _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1"); else { zfcp_erp_strategy_memwait(erp_action); retval = ZFCP_ERP_CONTINUES; @@ -1271,6 +1313,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) goto unlock; } +check_target: retval = zfcp_erp_strategy_check_target(erp_action, retval); zfcp_erp_action_dequeue(erp_action); retval = zfcp_erp_strategy_statechange(erp_action, retval); @@ -1299,11 +1342,9 @@ static int zfcp_erp_thread(void *data) unsigned long flags; for (;;) { - zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf); wait_event_interruptible(adapter->erp_ready_wq, !list_empty(&adapter->erp_ready_head) || kthread_should_stop()); - zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf); if (kthread_should_stop()) break; @@ -1364,42 +1405,6 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) } /** - * zfcp_erp_adapter_failed - Set adapter status to failed. - * @adapter: Failed adapter. - * @id: Event id for debug trace. - * @ref: Reference for debug trace. - */ -void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, char *id, void *ref) -{ - zfcp_erp_modify_adapter_status(adapter, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); -} - -/** - * zfcp_erp_port_failed - Set port status to failed. - * @port: Failed port. - * @id: Event id for debug trace. - * @ref: Reference for debug trace. - */ -void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref) -{ - zfcp_erp_modify_port_status(port, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); -} - -/** - * zfcp_erp_unit_failed - Set unit status to failed. - * @unit: Failed unit. - * @id: Event id for debug trace. - * @ref: Reference for debug trace. - */ -void zfcp_erp_unit_failed(struct zfcp_unit *unit, char *id, void *ref) -{ - zfcp_erp_modify_unit_status(unit, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); -} - -/** * zfcp_erp_wait - wait for completion of error recovery on an adapter * @adapter: adapter for which to wait for completion of its error recovery */ @@ -1411,210 +1416,158 @@ void zfcp_erp_wait(struct zfcp_adapter *adapter) } /** - * zfcp_erp_modify_adapter_status - change adapter status bits + * zfcp_erp_set_adapter_status - set adapter status bits * @adapter: adapter to change the status - * @id: id for the debug trace - * @ref: reference for the debug trace * @mask: status bits to change - * @set_or_clear: ZFCP_SET or ZFCP_CLEAR * - * Changes in common status bits are propagated to attached ports and units. + * Changes in common status bits are propagated to attached ports and LUNs. */ -void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, - void *ref, u32 mask, int set_or_clear) +void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) { struct zfcp_port *port; + struct scsi_device *sdev; unsigned long flags; u32 common_mask = mask & ZFCP_COMMON_FLAGS; - if (set_or_clear == ZFCP_SET) { - if (status_change_set(mask, &adapter->status)) - zfcp_dbf_rec_adapter(id, ref, adapter->dbf); - atomic_set_mask(mask, &adapter->status); - } else { - if (status_change_clear(mask, &adapter->status)) - zfcp_dbf_rec_adapter(id, ref, adapter->dbf); - atomic_clear_mask(mask, &adapter->status); - if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) - atomic_set(&adapter->erp_counter, 0); - } + atomic_set_mask(mask, &adapter->status); - if (common_mask) { - read_lock_irqsave(&adapter->port_list_lock, flags); - list_for_each_entry(port, &adapter->port_list, list) - zfcp_erp_modify_port_status(port, id, ref, common_mask, - set_or_clear); - read_unlock_irqrestore(&adapter->port_list_lock, flags); - } + if (!common_mask) + return; + + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) + atomic_set_mask(common_mask, &port->status); + read_unlock_irqrestore(&adapter->port_list_lock, flags); + + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, adapter->scsi_host) + atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); } /** - * zfcp_erp_modify_port_status - change port status bits - * @port: port to change the status bits - * @id: id for the debug trace - * @ref: reference for the debug trace + * zfcp_erp_clear_adapter_status - clear adapter status bits + * @adapter: adapter to change the status * @mask: status bits to change - * @set_or_clear: ZFCP_SET or ZFCP_CLEAR * - * Changes in common status bits are propagated to attached units. + * Changes in common status bits are propagated to attached ports and LUNs. */ -void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, - u32 mask, int set_or_clear) +void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) { - struct zfcp_unit *unit; + struct zfcp_port *port; + struct scsi_device *sdev; unsigned long flags; u32 common_mask = mask & ZFCP_COMMON_FLAGS; + u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; + + atomic_clear_mask(mask, &adapter->status); + + if (!common_mask) + return; - if (set_or_clear == ZFCP_SET) { - if (status_change_set(mask, &port->status)) - zfcp_dbf_rec_port(id, ref, port); - atomic_set_mask(mask, &port->status); - } else { - if (status_change_clear(mask, &port->status)) - zfcp_dbf_rec_port(id, ref, port); - atomic_clear_mask(mask, &port->status); - if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) + if (clear_counter) + atomic_set(&adapter->erp_counter, 0); + + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) { + atomic_clear_mask(common_mask, &port->status); + if (clear_counter) atomic_set(&port->erp_counter, 0); } + read_unlock_irqrestore(&adapter->port_list_lock, flags); - if (common_mask) { - read_lock_irqsave(&port->unit_list_lock, flags); - list_for_each_entry(unit, &port->unit_list, list) - zfcp_erp_modify_unit_status(unit, id, ref, common_mask, - set_or_clear); - read_unlock_irqrestore(&port->unit_list_lock, flags); + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, adapter->scsi_host) { + atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); + if (clear_counter) + atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); } + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); } /** - * zfcp_erp_modify_unit_status - change unit status bits - * @unit: unit to change the status bits - * @id: id for the debug trace - * @ref: reference for the debug trace + * zfcp_erp_set_port_status - set port status bits + * @port: port to change the status * @mask: status bits to change - * @set_or_clear: ZFCP_SET or ZFCP_CLEAR - */ -void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref, - u32 mask, int set_or_clear) -{ - if (set_or_clear == ZFCP_SET) { - if (status_change_set(mask, &unit->status)) - zfcp_dbf_rec_unit(id, ref, unit); - atomic_set_mask(mask, &unit->status); - } else { - if (status_change_clear(mask, &unit->status)) - zfcp_dbf_rec_unit(id, ref, unit); - atomic_clear_mask(mask, &unit->status); - if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { - atomic_set(&unit->erp_counter, 0); - } - } -} - -/** - * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP - * @port: The "boxed" port. - * @id: The debug trace id. - * @id: Reference for the debug trace. + * + * Changes in common status bits are propagated to attached LUNs. */ -void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) +void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) { - zfcp_erp_modify_port_status(port, id, ref, - ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); - zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); -} + struct scsi_device *sdev; + u32 common_mask = mask & ZFCP_COMMON_FLAGS; + unsigned long flags; -/** - * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP - * @port: The "boxed" unit. - * @id: The debug trace id. - * @id: Reference for the debug trace. - */ -void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref) -{ - zfcp_erp_modify_unit_status(unit, id, ref, - ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); - zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); -} + atomic_set_mask(mask, &port->status); -/** - * zfcp_erp_port_access_denied - Adapter denied access to port. - * @port: port where access has been denied - * @id: id for debug trace - * @ref: reference for debug trace - * - * Since the adapter has denied access, stop using the port and the - * attached units. - */ -void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref) -{ - zfcp_erp_modify_port_status(port, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED | - ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); + if (!common_mask) + return; + + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + atomic_set_mask(common_mask, + &sdev_to_zfcp(sdev)->status); + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); } /** - * zfcp_erp_unit_access_denied - Adapter denied access to unit. - * @unit: unit where access has been denied - * @id: id for debug trace - * @ref: reference for debug trace + * zfcp_erp_clear_port_status - clear port status bits + * @port: adapter to change the status + * @mask: status bits to change * - * Since the adapter has denied access, stop using the unit. + * Changes in common status bits are propagated to attached LUNs. */ -void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, char *id, void *ref) +void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) { - zfcp_erp_modify_unit_status(unit, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED | - ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); -} + struct scsi_device *sdev; + u32 common_mask = mask & ZFCP_COMMON_FLAGS; + u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; + unsigned long flags; -static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, char *id, - void *ref) -{ - int status = atomic_read(&unit->status); - if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | - ZFCP_STATUS_COMMON_ACCESS_BOXED))) + atomic_clear_mask(mask, &port->status); + + if (!common_mask) return; - zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); + if (clear_counter) + atomic_set(&port->erp_counter, 0); + + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) { + atomic_clear_mask(common_mask, + &sdev_to_zfcp(sdev)->status); + if (clear_counter) + atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); + } + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); } -static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id, - void *ref) +/** + * zfcp_erp_set_lun_status - set lun status bits + * @sdev: SCSI device / lun to set the status bits + * @mask: status bits to change + */ +void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask) { - struct zfcp_unit *unit; - unsigned long flags; - int status = atomic_read(&port->status); - - if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | - ZFCP_STATUS_COMMON_ACCESS_BOXED))) { - read_lock_irqsave(&port->unit_list_lock, flags); - list_for_each_entry(unit, &port->unit_list, list) - zfcp_erp_unit_access_changed(unit, id, ref); - read_unlock_irqrestore(&port->unit_list_lock, flags); - return; - } + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); - zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); + atomic_set_mask(mask, &zfcp_sdev->status); } /** - * zfcp_erp_adapter_access_changed - Process change in adapter ACT - * @adapter: Adapter where the Access Control Table (ACT) changed - * @id: Id for debug trace - * @ref: Reference for debug trace + * zfcp_erp_clear_lun_status - clear lun status bits + * @sdev: SCSi device / lun to clear the status bits + * @mask: status bits to change */ -void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id, - void *ref) +void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask) { - unsigned long flags; - struct zfcp_port *port; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); - if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) - return; + atomic_clear_mask(mask, &zfcp_sdev->status); - read_lock_irqsave(&adapter->port_list_lock, flags); - list_for_each_entry(port, &adapter->port_list, list) - zfcp_erp_port_access_changed(port, id, ref); - read_unlock_irqrestore(&adapter->port_list_lock, flags); + if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) + atomic_set(&zfcp_sdev->erp_counter, 0); } + diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 8786a79c7f8..a9c570a09b8 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -3,7 +3,7 @@ * * External function declarations. * - * Copyright IBM Corporation 2002, 2009 + * Copyright IBM Corp. 2002, 2010 */ #ifndef ZFCP_EXT_H @@ -15,87 +15,63 @@ #include "zfcp_fc.h" /* zfcp_aux.c */ -extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64); extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *); extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, u32); -extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); extern void zfcp_sg_free_table(struct scatterlist *, int); extern int zfcp_sg_setup_table(struct scatterlist *, int); -extern void zfcp_device_unregister(struct device *, - const struct attribute_group *); extern void zfcp_adapter_release(struct kref *); extern void zfcp_adapter_unregister(struct zfcp_adapter *); /* zfcp_ccw.c */ -extern int zfcp_ccw_priv_sch(struct zfcp_adapter *); extern struct ccw_driver zfcp_ccw_driver; extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *); extern void zfcp_ccw_adapter_put(struct zfcp_adapter *); -/* zfcp_cfdc.c */ -extern struct miscdevice zfcp_cfdc_misc; - /* zfcp_dbf.c */ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); -extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *); -extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *); -extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); -extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); -extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); -extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *); -extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, - struct zfcp_adapter *, struct zfcp_port *, - struct zfcp_unit *); -extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *); -extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *, - struct zfcp_dbf *); -extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *, - struct fsf_status_read_buffer *); -extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int); +extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); +extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, + struct zfcp_port *, struct scsi_device *, u8, u8); +extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); +extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); +extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); +extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); -extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32); -extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); -extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); -extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); -extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *); -extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *, - struct scsi_cmnd *, struct zfcp_fsf_req *, - unsigned long); +extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **); +extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *); +extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); +extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); +extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); +extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *); /* zfcp_erp.c */ -extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, - void *, u32, int); -extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *); -extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *, - void *); -extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *); -extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32, - int); -extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *); -extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *); -extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *, - void *); -extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *); -extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32, - int); -extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *); -extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *); -extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *); +extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); +extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); +extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *); +extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *); +extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); +extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); +extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *); +extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); +extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); +extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); +extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); +extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *); +extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *); +extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *); extern int zfcp_erp_thread_setup(struct zfcp_adapter *); extern void zfcp_erp_thread_kill(struct zfcp_adapter *); extern void zfcp_erp_wait(struct zfcp_adapter *); extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); -extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *); -extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *); -extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *); -extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *); -extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, - void *); extern void zfcp_erp_timeout_handler(unsigned long); /* zfcp_fc.c */ +extern struct kmem_cache *zfcp_fc_req_cache; +extern void zfcp_fc_enqueue_event(struct zfcp_adapter *, + enum fc_host_event_code event_code, u32); +extern void zfcp_fc_post_event(struct work_struct *); extern void zfcp_fc_scan_ports(struct work_struct *); extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); extern void zfcp_fc_port_did_lookup(struct work_struct *); @@ -108,23 +84,25 @@ extern int zfcp_fc_gs_setup(struct zfcp_adapter *); extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); +extern void zfcp_fc_sym_name_update(struct work_struct *); +extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *); +extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *); /* zfcp_fsf.c */ +extern struct kmem_cache *zfcp_fsf_qtcb_cache; extern int zfcp_fsf_open_port(struct zfcp_erp_action *); extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *); extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *); extern int zfcp_fsf_close_port(struct zfcp_erp_action *); extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); -extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); -extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); +extern int zfcp_fsf_open_lun(struct zfcp_erp_action *); +extern int zfcp_fsf_close_lun(struct zfcp_erp_action *); extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, struct fsf_qtcb_bottom_config *); extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *, struct fsf_qtcb_bottom_port *); -extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, - struct zfcp_fsf_cfdc *); extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); extern int zfcp_fsf_status_read(struct zfcp_qdio *); extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); @@ -132,40 +110,50 @@ extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, mempool_t *, unsigned int); extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, struct zfcp_fsf_ct_els *, unsigned int); -extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, - struct scsi_cmnd *); +extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *); extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); -extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); -extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, - struct zfcp_unit *); +extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8); +extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *); extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); /* zfcp_qdio.c */ extern int zfcp_qdio_setup(struct zfcp_adapter *); extern void zfcp_qdio_destroy(struct zfcp_qdio *); +extern int zfcp_qdio_sbal_get(struct zfcp_qdio *); extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *); -extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, - struct zfcp_qdio_req *, unsigned long, - struct scatterlist *, int); +extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *, + struct scatterlist *); extern int zfcp_qdio_open(struct zfcp_qdio *); extern void zfcp_qdio_close(struct zfcp_qdio *); +extern void zfcp_qdio_siosl(struct zfcp_adapter *); /* zfcp_scsi.c */ -extern struct zfcp_data zfcp_data; -extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); -extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); +extern struct scsi_transport_template *zfcp_scsi_transport_template; +extern int zfcp_scsi_adapter_register(struct zfcp_adapter *); +extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *); extern struct fc_function_template zfcp_transport_functions; extern void zfcp_scsi_rport_work(struct work_struct *); extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); -extern void zfcp_scsi_scan(struct work_struct *); +extern void zfcp_scsi_set_prot(struct zfcp_adapter *); +extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); /* zfcp_sysfs.c */ -extern struct attribute_group zfcp_sysfs_unit_attrs; +extern const struct attribute_group *zfcp_unit_attr_groups[]; extern struct attribute_group zfcp_sysfs_adapter_attrs; -extern struct attribute_group zfcp_sysfs_port_attrs; +extern const struct attribute_group *zfcp_port_attr_groups[]; +extern struct mutex zfcp_sysfs_port_units_mutex; extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; extern struct device_attribute *zfcp_sysfs_shost_attrs[]; +/* zfcp_unit.c */ +extern int zfcp_unit_add(struct zfcp_port *, u64); +extern int zfcp_unit_remove(struct zfcp_port *, u64); +extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64); +extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit); +extern void zfcp_unit_scsi_scan(struct zfcp_unit *); +extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *); +extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *); + #endif /* ZFCP_EXT_H */ diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 5219670f0c9..ca28e1c6611 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -3,18 +3,22 @@ * * Fibre Channel related functions for the zfcp device driver. * - * Copyright IBM Corporation 2008, 2010 + * Copyright IBM Corp. 2008, 2010 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/types.h> +#include <linux/slab.h> +#include <linux/utsname.h> #include <scsi/fc/fc_els.h> #include <scsi/libfc.h> #include "zfcp_ext.h" #include "zfcp_fc.h" +struct kmem_cache *zfcp_fc_req_cache; + static u32 zfcp_fc_rscn_range_mask[] = { [ELS_ADDR_FMT_PORT] = 0xFFFFFF, [ELS_ADDR_FMT_AREA] = 0xFFFF00, @@ -22,6 +26,79 @@ static u32 zfcp_fc_rscn_range_mask[] = { [ELS_ADDR_FMT_FAB] = 0x000000, }; +static bool no_auto_port_rescan; +module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600); +MODULE_PARM_DESC(no_auto_port_rescan, + "no automatic port_rescan (default off)"); + +void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter) +{ + if (no_auto_port_rescan) + return; + + queue_work(adapter->work_queue, &adapter->scan_work); +} + +void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter) +{ + if (!no_auto_port_rescan) + return; + + queue_work(adapter->work_queue, &adapter->scan_work); +} + +/** + * zfcp_fc_post_event - post event to userspace via fc_transport + * @work: work struct with enqueued events + */ +void zfcp_fc_post_event(struct work_struct *work) +{ + struct zfcp_fc_event *event = NULL, *tmp = NULL; + LIST_HEAD(tmp_lh); + struct zfcp_fc_events *events = container_of(work, + struct zfcp_fc_events, work); + struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter, + events); + + spin_lock_bh(&events->list_lock); + list_splice_init(&events->list, &tmp_lh); + spin_unlock_bh(&events->list_lock); + + list_for_each_entry_safe(event, tmp, &tmp_lh, list) { + fc_host_post_event(adapter->scsi_host, fc_get_event_number(), + event->code, event->data); + list_del(&event->list); + kfree(event); + } + +} + +/** + * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context + * @adapter: The adapter where to enqueue the event + * @event_code: The event code (as defined in fc_host_event_code in + * scsi_transport_fc.h) + * @event_data: The event data (e.g. n_port page in case of els) + */ +void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter, + enum fc_host_event_code event_code, u32 event_data) +{ + struct zfcp_fc_event *event; + + event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC); + if (!event) + return; + + event->code = event_code; + event->data = event_data; + + spin_lock(&adapter->events.list_lock); + list_add_tail(&event->list, &adapter->events.list); + spin_unlock(&adapter->events.list_lock); + + queue_work(adapter->work_queue, &adapter->events.work); +} + static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port) { if (mutex_lock_interruptible(&wka_port->mutex)) @@ -121,7 +198,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, if (!port->d_id) zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, - "fcrscn1", NULL); + "fcrscn1"); } read_unlock_irqrestore(&adapter->port_list_lock, flags); } @@ -147,8 +224,10 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK; _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt], page); + zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN, + *(u32 *)page); } - queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work); + zfcp_fc_conditional_port_scan(fsf_req->adapter); } static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) @@ -160,7 +239,7 @@ static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) read_lock_irqsave(&adapter->port_list_lock, flags); list_for_each_entry(port, &adapter->port_list, list) if (port->wwpn == wwpn) { - zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req); + zfcp_erp_port_forced_reopen(port, 0, "fciwwp1"); break; } read_unlock_irqrestore(&adapter->port_list_lock, flags); @@ -196,7 +275,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) (struct fsf_status_read_buffer *) fsf_req->data; unsigned int els_type = status_buffer->payload.data[0]; - zfcp_dbf_san_incoming_els(fsf_req); + zfcp_dbf_san_in_els("fciels1", fsf_req); if (els_type == ELS_PLOGI) zfcp_fc_incoming_plogi(fsf_req); else if (els_type == ELS_LOGO) @@ -205,24 +284,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) zfcp_fc_incoming_rscn(fsf_req); } -static void zfcp_fc_ns_gid_pn_eval(void *data) +static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req) { - struct zfcp_fc_gid_pn *gid_pn = data; - struct zfcp_fsf_ct_els *ct = &gid_pn->ct; - struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req); - struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp); - struct zfcp_port *port = gid_pn->port; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp; - if (ct->status) + if (ct_els->status) return; - if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC) + if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC) return; - /* paranoia */ - if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn) - return; /* looks like a valid d_id */ - port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid); + ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid); } static void zfcp_fc_complete(void *data) @@ -230,69 +303,73 @@ static void zfcp_fc_complete(void *data) complete(data); } +static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size) +{ + ct_hdr->ct_rev = FC_CT_REV; + ct_hdr->ct_fs_type = FC_FST_DIR; + ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE; + ct_hdr->ct_cmd = cmd; + ct_hdr->ct_mr_size = mr_size / 4; +} + static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, - struct zfcp_fc_gid_pn *gid_pn) + struct zfcp_fc_req *fc_req) { struct zfcp_adapter *adapter = port->adapter; DECLARE_COMPLETION_ONSTACK(completion); + struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req; + struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp; int ret; /* setup parameters for send generic command */ - gid_pn->port = port; - gid_pn->ct.handler = zfcp_fc_complete; - gid_pn->ct.handler_data = &completion; - gid_pn->ct.req = &gid_pn->sg_req; - gid_pn->ct.resp = &gid_pn->sg_resp; - sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req, - sizeof(struct zfcp_fc_gid_pn_req)); - sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp, - sizeof(struct zfcp_fc_gid_pn_resp)); - - /* setup nameserver request */ - gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV; - gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR; - gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE; - gid_pn->gid_pn_req.ct_hdr.ct_options = 0; - gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN; - gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4; - gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; - - ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, + fc_req->ct_els.port = port; + fc_req->ct_els.handler = zfcp_fc_complete; + fc_req->ct_els.handler_data = &completion; + fc_req->ct_els.req = &fc_req->sg_req; + fc_req->ct_els.resp = &fc_req->sg_rsp; + sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req)); + sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp)); + + zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr, + FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE); + gid_pn_req->gid_pn.fn_wwpn = port->wwpn; + + ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els, adapter->pool.gid_pn_req, ZFCP_FC_CTELS_TMO); if (!ret) { wait_for_completion(&completion); - zfcp_fc_ns_gid_pn_eval(gid_pn); + zfcp_fc_ns_gid_pn_eval(fc_req); } return ret; } /** - * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request + * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request * @port: port where GID_PN request is needed * return: -ENOMEM on error, 0 otherwise */ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) { int ret; - struct zfcp_fc_gid_pn *gid_pn; + struct zfcp_fc_req *fc_req; struct zfcp_adapter *adapter = port->adapter; - gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC); - if (!gid_pn) + fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC); + if (!fc_req) return -ENOMEM; - memset(gid_pn, 0, sizeof(*gid_pn)); + memset(fc_req, 0, sizeof(*fc_req)); ret = zfcp_fc_wka_port_get(&adapter->gs->ds); if (ret) goto out; - ret = zfcp_fc_ns_gid_pn_request(port, gid_pn); + ret = zfcp_fc_ns_gid_pn_request(port, fc_req); zfcp_fc_wka_port_put(&adapter->gs->ds); out: - mempool_free(gid_pn, adapter->pool.gid_pn); + mempool_free(fc_req, adapter->pool.gid_pn); return ret; } @@ -305,16 +382,16 @@ void zfcp_fc_port_did_lookup(struct work_struct *work) ret = zfcp_fc_ns_gid_pn(port); if (ret) { /* could not issue gid_pn for some reason */ - zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL); + zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1"); goto out; } if (!port->d_id) { - zfcp_erp_port_failed(port, "fcgpn_2", NULL); + zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); goto out; } - zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); + zfcp_erp_port_reopen(port, 0, "fcgpn_3"); out: put_device(&port->dev); } @@ -364,14 +441,14 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi) static void zfcp_fc_adisc_handler(void *data) { - struct zfcp_fc_els_adisc *adisc = data; - struct zfcp_port *port = adisc->els.port; - struct fc_els_adisc *adisc_resp = &adisc->adisc_resp; + struct zfcp_fc_req *fc_req = data; + struct zfcp_port *port = fc_req->ct_els.port; + struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp; - if (adisc->els.status) { + if (fc_req->ct_els.status) { /* request rejected or timed out */ zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, - "fcadh_1", NULL); + "fcadh_1"); goto out; } @@ -381,7 +458,7 @@ static void zfcp_fc_adisc_handler(void *data) if ((port->wwpn != adisc_resp->adisc_wwpn) || !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) { zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, - "fcadh_2", NULL); + "fcadh_2"); goto out; } @@ -390,42 +467,42 @@ static void zfcp_fc_adisc_handler(void *data) out: atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->dev); - kmem_cache_free(zfcp_data.adisc_cache, adisc); + kmem_cache_free(zfcp_fc_req_cache, fc_req); } static int zfcp_fc_adisc(struct zfcp_port *port) { - struct zfcp_fc_els_adisc *adisc; + struct zfcp_fc_req *fc_req; struct zfcp_adapter *adapter = port->adapter; + struct Scsi_Host *shost = adapter->scsi_host; int ret; - adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC); - if (!adisc) + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC); + if (!fc_req) return -ENOMEM; - adisc->els.port = port; - adisc->els.req = &adisc->req; - adisc->els.resp = &adisc->resp; - sg_init_one(adisc->els.req, &adisc->adisc_req, + fc_req->ct_els.port = port; + fc_req->ct_els.req = &fc_req->sg_req; + fc_req->ct_els.resp = &fc_req->sg_rsp; + sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req, sizeof(struct fc_els_adisc)); - sg_init_one(adisc->els.resp, &adisc->adisc_resp, + sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp, sizeof(struct fc_els_adisc)); - adisc->els.handler = zfcp_fc_adisc_handler; - adisc->els.handler_data = adisc; + fc_req->ct_els.handler = zfcp_fc_adisc_handler; + fc_req->ct_els.handler_data = fc_req; /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports without FC-AL-2 capability, so we don't set it */ - adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host); - adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host); - adisc->adisc_req.adisc_cmd = ELS_ADISC; - hton24(adisc->adisc_req.adisc_port_id, - fc_host_port_id(adapter->scsi_host)); + fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost); + fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost); + fc_req->u.adisc.req.adisc_cmd = ELS_ADISC; + hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost)); - ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els, + ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els, ZFCP_FC_CTELS_TMO); if (ret) - kmem_cache_free(zfcp_data.adisc_cache, adisc); + kmem_cache_free(zfcp_fc_req_cache, fc_req); return ret; } @@ -452,7 +529,7 @@ void zfcp_fc_link_test_work(struct work_struct *work) /* send of ADISC was not possible */ atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); - zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); + zfcp_erp_port_forced_reopen(port, 0, "fcltwk1"); out: put_device(&port->dev); @@ -473,68 +550,42 @@ void zfcp_fc_test_link(struct zfcp_port *port) put_device(&port->dev); } -static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num) +static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num) { - struct scatterlist *sg = &gpn_ft->sg_req; - - kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg)); - zfcp_sg_free_table(gpn_ft->sg_resp, buf_num); + struct zfcp_fc_req *fc_req; - kfree(gpn_ft); -} - -static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num) -{ - struct zfcp_fc_gpn_ft *gpn_ft; - struct zfcp_fc_gpn_ft_req *req; - - gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL); - if (!gpn_ft) + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL); + if (!fc_req) return NULL; - req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL); - if (!req) { - kfree(gpn_ft); - gpn_ft = NULL; - goto out; + if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) { + kmem_cache_free(zfcp_fc_req_cache, fc_req); + return NULL; } - sg_init_one(&gpn_ft->sg_req, req, sizeof(*req)); - if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) { - zfcp_free_sg_env(gpn_ft, buf_num); - gpn_ft = NULL; - } -out: - return gpn_ft; -} + sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req, + sizeof(struct zfcp_fc_gpn_ft_req)); + return fc_req; +} -static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, +static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req, struct zfcp_adapter *adapter, int max_bytes) { - struct zfcp_fsf_ct_els *ct = &gpn_ft->ct; - struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req; DECLARE_COMPLETION_ONSTACK(completion); int ret; - /* prepare CT IU for GPN_FT */ - req->ct_hdr.ct_rev = FC_CT_REV; - req->ct_hdr.ct_fs_type = FC_FST_DIR; - req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE; - req->ct_hdr.ct_options = 0; - req->ct_hdr.ct_cmd = FC_NS_GPN_FT; - req->ct_hdr.ct_mr_size = max_bytes / 4; - req->gpn_ft.fn_domain_id_scope = 0; - req->gpn_ft.fn_area_id_scope = 0; + zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes); req->gpn_ft.fn_fc4_type = FC_TYPE_FCP; - /* prepare zfcp_send_ct */ - ct->handler = zfcp_fc_complete; - ct->handler_data = &completion; - ct->req = &gpn_ft->sg_req; - ct->resp = gpn_ft->sg_resp; + ct_els->handler = zfcp_fc_complete; + ct_els->handler_data = &completion; + ct_els->req = &fc_req->sg_req; + ct_els->resp = &fc_req->sg_rsp; - ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL, + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL, ZFCP_FC_CTELS_TMO); if (!ret) wait_for_completion(&completion); @@ -555,11 +606,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) list_move_tail(&port->list, lh); } -static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, +static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req, struct zfcp_adapter *adapter, int max_entries) { - struct zfcp_fsf_ct_els *ct = &gpn_ft->ct; - struct scatterlist *sg = gpn_ft->sg_resp; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct scatterlist *sg = &fc_req->sg_rsp; struct fc_ct_hdr *hdr = sg_virt(sg); struct fc_gpn_ft_resp *acc = sg_virt(sg); struct zfcp_port *port, *tmp; @@ -568,7 +619,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, u32 d_id; int ret = 0, x, last = 0; - if (ct->status) + if (ct_els->status) return -EIO; if (hdr->ct_cmd != FC_FS_ACC) { @@ -604,7 +655,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, port = zfcp_port_enqueue(adapter, acc->fp_wwpn, ZFCP_STATUS_COMMON_NOESC, d_id); if (!IS_ERR(port)) - zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL); + zfcp_erp_port_reopen(port, 0, "fcegpf1"); else if (PTR_ERR(port) != -EEXIST) ret = PTR_ERR(port); } @@ -616,8 +667,8 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, write_unlock_irqrestore(&adapter->port_list_lock, flags); list_for_each_entry_safe(port, tmp, &remove_lh, list) { - zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL); - zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); + zfcp_erp_port_shutdown(port, 0, "fcegpf2"); + device_unregister(&port->dev); } return ret; @@ -632,7 +683,7 @@ void zfcp_fc_scan_ports(struct work_struct *work) struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, scan_work); int ret, i; - struct zfcp_fc_gpn_ft *gpn_ft; + struct zfcp_fc_req *fc_req; int chain, max_entries, buf_num, max_bytes; chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; @@ -647,25 +698,145 @@ void zfcp_fc_scan_ports(struct work_struct *work) if (zfcp_fc_wka_port_get(&adapter->gs->ds)) return; - gpn_ft = zfcp_alloc_sg_env(buf_num); - if (!gpn_ft) + fc_req = zfcp_alloc_sg_env(buf_num); + if (!fc_req) goto out; for (i = 0; i < 3; i++) { - ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); + ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes); if (!ret) { - ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries); + ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries); if (ret == -EAGAIN) ssleep(1); else break; } } - zfcp_free_sg_env(gpn_ft, buf_num); + zfcp_sg_free_table(&fc_req->sg_rsp, buf_num); + kmem_cache_free(zfcp_fc_req_cache, fc_req); out: zfcp_fc_wka_port_put(&adapter->gs->ds); } +static int zfcp_fc_gspn(struct zfcp_adapter *adapter, + struct zfcp_fc_req *fc_req) +{ + DECLARE_COMPLETION_ONSTACK(completion); + char devno[] = "DEVNO:"; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req; + struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp; + int ret; + + zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID, + FC_SYMBOLIC_NAME_SIZE); + hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host)); + + sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req)); + sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp)); + + ct_els->handler = zfcp_fc_complete; + ct_els->handler_data = &completion; + ct_els->req = &fc_req->sg_req; + ct_els->resp = &fc_req->sg_rsp; + + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL, + ZFCP_FC_CTELS_TMO); + if (ret) + return ret; + + wait_for_completion(&completion); + if (ct_els->status) + return ct_els->status; + + if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV && + !(strstr(gspn_rsp->gspn.fp_name, devno))) + snprintf(fc_host_symbolic_name(adapter->scsi_host), + FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s", + gspn_rsp->gspn.fp_name, devno, + dev_name(&adapter->ccw_device->dev), + init_utsname()->nodename); + else + strlcpy(fc_host_symbolic_name(adapter->scsi_host), + gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE); + + return 0; +} + +static void zfcp_fc_rspn(struct zfcp_adapter *adapter, + struct zfcp_fc_req *fc_req) +{ + DECLARE_COMPLETION_ONSTACK(completion); + struct Scsi_Host *shost = adapter->scsi_host; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req; + struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp; + int ret, len; + + zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID, + FC_SYMBOLIC_NAME_SIZE); + hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost)); + len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost), + FC_SYMBOLIC_NAME_SIZE); + rspn_req->rspn.fr_name_len = len; + + sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req)); + sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp)); + + ct_els->handler = zfcp_fc_complete; + ct_els->handler_data = &completion; + ct_els->req = &fc_req->sg_req; + ct_els->resp = &fc_req->sg_rsp; + + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL, + ZFCP_FC_CTELS_TMO); + if (!ret) + wait_for_completion(&completion); +} + +/** + * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name + * @work: ns_up_work of the adapter where to update the symbolic port name + * + * Retrieve the current symbolic port name that may have been set by + * the hardware using the GSPN request and update the fc_host + * symbolic_name sysfs attribute. When running in NPIV mode (and hence + * the port name is unique for this system), update the symbolic port + * name to add Linux specific information and update the FC nameserver + * using the RSPN request. + */ +void zfcp_fc_sym_name_update(struct work_struct *work) +{ + struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, + ns_up_work); + int ret; + struct zfcp_fc_req *fc_req; + + if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT && + fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) + return; + + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL); + if (!fc_req) + return; + + ret = zfcp_fc_wka_port_get(&adapter->gs->ds); + if (ret) + goto out_free; + + ret = zfcp_fc_gspn(adapter, fc_req); + if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) + goto out_ds_put; + + memset(fc_req, 0, sizeof(*fc_req)); + zfcp_fc_rspn(adapter, fc_req); + +out_ds_put: + zfcp_fc_wka_port_put(&adapter->gs->ds); +out_free: + kmem_cache_free(zfcp_fc_req_cache, fc_req); +} + static void zfcp_fc_ct_els_job_handler(void *data) { struct fc_bsg_job *job = data; diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 0747b087390..b1d2024ed51 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -4,7 +4,7 @@ * Fibre Channel related definitions and inline functions for the zfcp * device driver * - * Copyright IBM Corporation 2009 + * Copyright IBM Corp. 2009 */ #ifndef ZFCP_FC_H @@ -30,6 +30,30 @@ #define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000) /** + * struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context + * @code: Event code + * @data: Event data + * @list: list_head for zfcp_fc_events list + */ +struct zfcp_fc_event { + enum fc_host_event_code code; + u32 data; + struct list_head list; +}; + +/** + * struct zfcp_fc_events - Infrastructure for posting FC events from irq context + * @list: List for queueing of events from irq context to workqueue + * @list_lock: Lock for event list + * @work: work_struct for forwarding events in workqueue +*/ +struct zfcp_fc_events { + struct list_head list; + spinlock_t list_lock; + struct work_struct work; +}; + +/** * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request * @ct_hdr: FC GS common transport header * @gid_pn: GID_PN request @@ -40,33 +64,16 @@ struct zfcp_fc_gid_pn_req { } __packed; /** - * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response + * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response * @ct_hdr: FC GS common transport header * @gid_pn: GID_PN response */ -struct zfcp_fc_gid_pn_resp { +struct zfcp_fc_gid_pn_rsp { struct fc_ct_hdr ct_hdr; struct fc_gid_pn_resp gid_pn; } __packed; /** - * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request - * @ct: data passed to zfcp_fsf for issuing fsf request - * @sg_req: scatterlist entry for request data - * @sg_resp: scatterlist entry for response data - * @gid_pn_req: GID_PN request data - * @gid_pn_resp: GID_PN response data - */ -struct zfcp_fc_gid_pn { - struct zfcp_fsf_ct_els ct; - struct scatterlist sg_req; - struct scatterlist sg_resp; - struct zfcp_fc_gid_pn_req gid_pn_req; - struct zfcp_fc_gid_pn_resp gid_pn_resp; - struct zfcp_port *port; -}; - -/** * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request * @ct_hdr: FC GS common transport header * @gpn_ft: GPN_FT request @@ -77,41 +84,72 @@ struct zfcp_fc_gpn_ft_req { } __packed; /** - * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response + * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request * @ct_hdr: FC GS common transport header - * @gpn_ft: Array of gpn_ft response data to fill one memory page + * @gspn: GSPN_ID request */ -struct zfcp_fc_gpn_ft_resp { +struct zfcp_fc_gspn_req { struct fc_ct_hdr ct_hdr; - struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE]; + struct fc_gid_pn_resp gspn; } __packed; /** - * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request - * @ct: data passed to zfcp_fsf for issuing fsf request - * @sg_req: scatter list entry for gpn_ft request - * @sg_resp: scatter list entries for gpn_ft responses (per memory page) + * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response + * @ct_hdr: FC GS common transport header + * @gspn: GSPN_ID response + * @name: The name string of the GSPN_ID response */ -struct zfcp_fc_gpn_ft { - struct zfcp_fsf_ct_els ct; - struct scatterlist sg_req; - struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS]; -}; +struct zfcp_fc_gspn_rsp { + struct fc_ct_hdr ct_hdr; + struct fc_gspn_resp gspn; + char name[FC_SYMBOLIC_NAME_SIZE]; +} __packed; /** - * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC - * @els: data required for issuing els fsf command - * @req: scatterlist entry for ELS ADISC request - * @resp: scatterlist entry for ELS ADISC response - * @adisc_req: ELS ADISC request data - * @adisc_resp: ELS ADISC response data + * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request + * @ct_hdr: FC GS common transport header + * @rspn: RSPN_ID request + * @name: The name string of the RSPN_ID request + */ +struct zfcp_fc_rspn_req { + struct fc_ct_hdr ct_hdr; + struct fc_ns_rspn rspn; + char name[FC_SYMBOLIC_NAME_SIZE]; +} __packed; + +/** + * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp + * @ct_els: data required for issuing fsf command + * @sg_req: scatterlist entry for request data + * @sg_rsp: scatterlist entry for response data + * @u: request specific data */ -struct zfcp_fc_els_adisc { - struct zfcp_fsf_ct_els els; - struct scatterlist req; - struct scatterlist resp; - struct fc_els_adisc adisc_req; - struct fc_els_adisc adisc_resp; +struct zfcp_fc_req { + struct zfcp_fsf_ct_els ct_els; + struct scatterlist sg_req; + struct scatterlist sg_rsp; + union { + struct { + struct fc_els_adisc req; + struct fc_els_adisc rsp; + } adisc; + struct { + struct zfcp_fc_gid_pn_req req; + struct zfcp_fc_gid_pn_rsp rsp; + } gid_pn; + struct { + struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1]; + struct zfcp_fc_gpn_ft_req req; + } gpn_ft; + struct { + struct zfcp_fc_gspn_req req; + struct zfcp_fc_gspn_rsp rsp; + } gspn; + struct { + struct zfcp_fc_rspn_req req; + struct fc_ct_hdr rsp; + } rspn; + } u; }; /** @@ -168,14 +206,21 @@ struct zfcp_fc_wka_ports { * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd * @fcp: fcp_cmnd to setup * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB + * @tm: task management flags to setup task management command */ static inline -void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) +void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, + u8 tm_flags) { char tag[2]; int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun); + if (unlikely(tm_flags)) { + fcp->fc_tm_flags = tm_flags; + return; + } + if (scsi_populate_tag_msg(scsi, tag)) { switch (tag[0]) { case MSG_ORDERED_TAG: @@ -196,19 +241,9 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len); fcp->fc_dl = scsi_bufflen(scsi); -} -/** - * zfcp_fc_fcp_tm - setup FCP command as task management command - * @fcp: fcp_cmnd to setup - * @dev: scsi_device where to send the task management command - * @tm: task management flags to setup tm command - */ -static inline -void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags) -{ - int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun); - fcp->fc_tm_flags |= tm_flags; + if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1) + fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8; } /** @@ -243,7 +278,7 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp, if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) { sense = (char *) &fcp_rsp[1]; if (rsp_flags & FCP_RSP_LEN_VAL) - sense += fcp_rsp->ext.fr_sns_len; + sense += fcp_rsp->ext.fr_rsp_len; sense_len = min(fcp_rsp->ext.fr_sns_len, (u32) SCSI_SENSE_BUFFERSIZE); memcpy(scsi->sense_buffer, sense, sense_len); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 6538742b421..0fe8d5d9511 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -3,13 +3,14 @@ * * Implementation of FSF commands. * - * Copyright IBM Corporation 2002, 2010 + * Copyright IBM Corp. 2002, 2013 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/blktrace_api.h> +#include <linux/slab.h> #include <scsi/fc/fc_els.h> #include "zfcp_ext.h" #include "zfcp_fc.h" @@ -17,11 +18,14 @@ #include "zfcp_qdio.h" #include "zfcp_reqlist.h" +struct kmem_cache *zfcp_fsf_qtcb_cache; + static void zfcp_fsf_request_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; + zfcp_qdio_siosl(adapter); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, - "fsrth_1", NULL); + "fsrth_1"); } static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, @@ -59,50 +63,11 @@ static u32 fsf_qtcb_type[] = { [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND }; -static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) -{ - u16 subtable = table >> 16; - u16 rule = table & 0xffff; - const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" }; - - if (subtable && subtable < ARRAY_SIZE(act_type)) - dev_warn(&adapter->ccw_device->dev, - "Access denied according to ACT rule type %s, " - "rule %d\n", act_type[subtable], rule); -} - -static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req, - struct zfcp_port *port) -{ - struct fsf_qtcb_header *header = &req->qtcb->header; - dev_warn(&req->adapter->ccw_device->dev, - "Access denied to port 0x%016Lx\n", - (unsigned long long)port->wwpn); - zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); - zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); - zfcp_erp_port_access_denied(port, "fspad_1", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR; -} - -static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req, - struct zfcp_unit *unit) -{ - struct fsf_qtcb_header *header = &req->qtcb->header; - dev_warn(&req->adapter->ccw_device->dev, - "Access denied to unit 0x%016Lx on port 0x%016Lx\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); - zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); - zfcp_erp_unit_access_denied(unit, "fsuad_1", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR; -} - static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) { dev_err(&req->adapter->ccw_device->dev, "FCP device not " "operational because of an unsupported FC class\n"); - zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req); + zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; } @@ -120,7 +85,7 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req) } if (likely(req->qtcb)) - kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb); + kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); kfree(req); } @@ -135,13 +100,13 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) read_lock_irqsave(&adapter->port_list_lock, flags); list_for_each_entry(port, &adapter->port_list, list) if (port->d_id == d_id) { - zfcp_erp_port_reopen(port, 0, "fssrpc1", req); + zfcp_erp_port_reopen(port, 0, "fssrpc1"); break; } read_unlock_irqrestore(&adapter->port_list_lock, flags); } -static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, +static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, struct fsf_link_down_info *link_down) { struct zfcp_adapter *adapter = req->adapter; @@ -221,7 +186,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, "the FC fabric is down\n"); } out: - zfcp_erp_adapter_failed(adapter, id, req); + zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); } static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) @@ -232,13 +197,13 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) switch (sr_buf->status_subtype) { case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: - zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi); + zfcp_fsf_link_down_info_eval(req, ldi); break; case FSF_STATUS_READ_SUB_FDISC_FAILED: - zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi); + zfcp_fsf_link_down_info_eval(req, ldi); break; case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: - zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL); + zfcp_fsf_link_down_info_eval(req, NULL); }; } @@ -248,13 +213,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) struct fsf_status_read_buffer *sr_buf = req->data; if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { - zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf); - mempool_free(sr_buf, adapter->pool.status_read_data); + zfcp_dbf_hba_fsf_uss("fssrh_1", req); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_req_free(req); return; } - zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf); + zfcp_dbf_hba_fsf_uss("fssrh_4", req); switch (sr_buf->status_type) { case FSF_STATUS_READ_PORT_CLOSED: @@ -269,39 +234,35 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) dev_warn(&adapter->ccw_device->dev, "The error threshold for checksum statistics " "has been exceeded\n"); - zfcp_dbf_hba_berr(adapter->dbf, req); + zfcp_dbf_hba_bit_err("fssrh_3", req); break; case FSF_STATUS_READ_LINK_DOWN: zfcp_fsf_status_read_link_down(req); + zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0); break; case FSF_STATUS_READ_LINK_UP: dev_info(&adapter->ccw_device->dev, "The local link has been restored\n"); /* All ports should be marked as ready to run again */ - zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL, - ZFCP_STATUS_COMMON_RUNNING, - ZFCP_SET); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | ZFCP_STATUS_COMMON_ERP_FAILED, - "fssrh_2", req); + "fssrh_2"); + zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); + break; case FSF_STATUS_READ_NOTIFICATION_LOST: - if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) - zfcp_erp_adapter_access_changed(adapter, "fssrh_3", - req); if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) - queue_work(adapter->work_queue, &adapter->scan_work); - break; - case FSF_STATUS_READ_CFDC_UPDATED: - zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req); + zfcp_fc_conditional_port_scan(adapter); break; case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: adapter->adapter_features = sr_buf->payload.word[0]; break; } - mempool_free(sr_buf, adapter->pool.status_read_data); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_req_free(req); atomic_inc(&adapter->stat_miss); @@ -322,7 +283,8 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) dev_err(&req->adapter->ccw_device->dev, "The FCP adapter reported a problem " "that cannot be recovered\n"); - zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req); + zfcp_qdio_siosl(req->adapter); + zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1"); break; } /* all non-return stats set FSFREQ_ERROR*/ @@ -339,7 +301,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) dev_err(&req->adapter->ccw_device->dev, "The FCP adapter does not recognize the command 0x%x\n", req->qtcb->header.fsf_command); - zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req); + zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: @@ -370,17 +332,17 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) "QTCB version 0x%x not supported by FCP adapter " "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, psq->word[0], psq->word[1]); - zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req); + zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1"); break; case FSF_PROT_ERROR_STATE: case FSF_PROT_SEQ_NUMB_ERROR: - zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req); + zfcp_erp_adapter_reopen(adapter, 0, "fspse_2"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PROT_UNSUPP_QTCB_TYPE: dev_err(&adapter->ccw_device->dev, "The QTCB type is not supported by the FCP adapter\n"); - zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req); + zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); break; case FSF_PROT_HOST_CONNECTION_INITIALIZING: atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, @@ -390,29 +352,28 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) dev_err(&adapter->ccw_device->dev, "0x%Lx is an ambiguous request identifier\n", (unsigned long long)qtcb->bottom.support.req_handle); - zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req); + zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4"); break; case FSF_PROT_LINK_DOWN: - zfcp_fsf_link_down_info_eval(req, "fspse_5", - &psq->link_down_info); + zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); /* go through reopen to flush pending requests */ - zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); + zfcp_erp_adapter_reopen(adapter, 0, "fspse_6"); break; case FSF_PROT_REEST_QUEUE: /* All ports should be marked as ready to run again */ - zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL, - ZFCP_STATUS_COMMON_RUNNING, - ZFCP_SET); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | ZFCP_STATUS_COMMON_ERP_FAILED, - "fspse_8", req); + "fspse_8"); break; default: dev_err(&adapter->ccw_device->dev, "0x%x is not a valid transfer protocol status\n", qtcb->prefix.prot_status); - zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req); + zfcp_qdio_siosl(adapter); + zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9"); } req->status |= ZFCP_STATUS_FSFREQ_ERROR; } @@ -471,6 +432,34 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) } } +#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0) +#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1) +#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2) +#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) +#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) +#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) +#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) + +static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) +{ + u32 fdmi_speed = 0; + if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT) + fdmi_speed |= FC_PORTSPEED_1GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT) + fdmi_speed |= FC_PORTSPEED_2GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT) + fdmi_speed |= FC_PORTSPEED_4GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT) + fdmi_speed |= FC_PORTSPEED_10GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT) + fdmi_speed |= FC_PORTSPEED_8GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) + fdmi_speed |= FC_PORTSPEED_16GBIT; + if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) + fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; + return fdmi_speed; +} + static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) { struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; @@ -489,16 +478,28 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) fc_host_port_name(shost) = nsp->fl_wwpn; fc_host_node_name(shost) = nsp->fl_wwnn; - fc_host_port_id(shost) = ntoh24(bottom->s_id); - fc_host_speed(shost) = bottom->fc_link_speed; fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; - adapter->hydra_version = bottom->adapter_type; - adapter->timer_ticks = bottom->timer_interval; + adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; + adapter->stat_read_buf_num = max(bottom->status_read_buf_num, + (u16)FSF_STATUS_READS_RECOM); if (fc_host_permanent_port_name(shost) == -1) fc_host_permanent_port_name(shost) = fc_host_port_name(shost); + zfcp_scsi_set_prot(adapter); + + /* no error return above here, otherwise must fix call chains */ + /* do not evaluate invalid fields */ + if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) + return 0; + + fc_host_port_id(shost) = ntoh24(bottom->s_id); + fc_host_speed(shost) = + zfcp_fsf_convert_portspeed(bottom->fc_link_speed); + + adapter->hydra_version = bottom->adapter_type; + switch (bottom->fc_topology) { case FSF_TOPO_P2P: adapter->peer_d_id = ntoh24(bottom->peer_d_id); @@ -516,7 +517,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) dev_err(&adapter->ccw_device->dev, "Unknown or unsupported arbitrated loop " "fibre channel topology detected\n"); - zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req); + zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1"); return -EIO; } @@ -550,7 +551,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) "FCP adapter maximum QTCB size (%d bytes) " "is too small\n", bottom->max_qtcb_size); - zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req); + zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); return; } atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, @@ -564,14 +565,17 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; adapter->hydra_version = 0; + /* avoids adapter shutdown to be able to recognize + * events such as LINK UP */ atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); - - zfcp_fsf_link_down_info_eval(req, "fsecdh2", + zfcp_fsf_link_down_info_eval(req, &qtcb->header.fsf_status_qual.link_down_info); + if (zfcp_fsf_exchange_config_evaluate(req)) + return; break; default: - zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req); + zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3"); return; } @@ -587,14 +591,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) dev_err(&adapter->ccw_device->dev, "The FCP adapter only supports newer " "control block versions\n"); - zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req); + zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4"); return; } if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { dev_err(&adapter->ccw_device->dev, "The FCP adapter only supports older " "control block versions\n"); - zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req); + zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5"); } } @@ -613,7 +617,8 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) } else fc_host_permanent_port_name(shost) = fc_host_port_name(shost); fc_host_maxframe_size(shost) = bottom->maximum_frame_size; - fc_host_supported_speeds(shost) = bottom->supported_speed; + fc_host_supported_speeds(shost) = + zfcp_fsf_convert_portspeed(bottom->supported_speed); memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, FC_FC4_LIST_SIZE); memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, @@ -633,43 +638,12 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) break; case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: zfcp_fsf_exchange_port_evaluate(req); - zfcp_fsf_link_down_info_eval(req, "fsepdh1", + zfcp_fsf_link_down_info_eval(req, &qtcb->header.fsf_status_qual.link_down_info); break; } } -static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio) -{ - struct zfcp_qdio_queue *req_q = &qdio->req_q; - - spin_lock_bh(&qdio->req_q_lock); - if (atomic_read(&req_q->count)) - return 1; - spin_unlock_bh(&qdio->req_q_lock); - return 0; -} - -static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio) -{ - struct zfcp_adapter *adapter = qdio->adapter; - long ret; - - spin_unlock_bh(&qdio->req_q_lock); - ret = wait_event_interruptible_timeout(qdio->req_q_wq, - zfcp_fsf_sbal_check(qdio), 5 * HZ); - if (ret > 0) - return 0; - if (!ret) { - atomic_inc(&qdio->req_q_full); - /* assume hanging outbound queue, try queue recovery */ - zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); - } - - spin_lock_bh(&qdio->req_q_lock); - return -EIO; -} - static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) { struct zfcp_fsf_req *req; @@ -694,7 +668,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) if (likely(pool)) qtcb = mempool_alloc(pool, GFP_ATOMIC); else - qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC); + qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC); if (unlikely(!qtcb)) return NULL; @@ -704,10 +678,9 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) } static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, - u32 fsf_cmd, mempool_t *pool) + u32 fsf_cmd, u8 sbtype, + mempool_t *pool) { - struct qdio_buffer_element *sbale; - struct zfcp_qdio_queue *req_q = &qdio->req_q; struct zfcp_adapter *adapter = qdio->adapter; struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); @@ -724,14 +697,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, req->adapter = adapter; req->fsf_command = fsf_cmd; req->req_id = adapter->req_no; - req->qdio_req.sbal_number = 1; - req->qdio_req.sbal_first = req_q->first; - req->qdio_req.sbal_last = req_q->first; - req->qdio_req.sbale_curr = 1; - - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].addr = (void *) req->req_id; - sbale[0].flags |= SBAL_FLAGS0_COMMAND; if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { if (likely(pool)) @@ -752,14 +717,10 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; req->qtcb->header.req_handle = req->req_id; req->qtcb->header.fsf_command = req->fsf_command; - sbale[1].addr = (void *) req->qtcb; - sbale[1].length = sizeof(struct fsf_qtcb); } - if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) { - zfcp_fsf_req_free(req); - return ERR_PTR(-EIO); - } + zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, + req->qtcb, sizeof(struct fsf_qtcb)); return req; } @@ -773,13 +734,13 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) zfcp_reqlist_add(adapter->req_list, req); - req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); - req->issued = get_clock(); + req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); + req->issued = get_tod_clock(); if (zfcp_qdio_send(qdio, &req->qdio_req)) { del_timer(&req->timer); /* lookup request again, list might have changed */ zfcp_reqlist_find_rm(adapter->req_list, req_id); - zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req); + zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); return -EIO; } @@ -802,34 +763,32 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) struct zfcp_adapter *adapter = qdio->adapter; struct zfcp_fsf_req *req; struct fsf_status_read_buffer *sr_buf; - struct qdio_buffer_element *sbale; + struct page *page; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, + SBAL_SFLAGS0_TYPE_STATUS, adapter->pool.status_read_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; - req->qdio_req.sbale_curr = 2; - - sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); - if (!sr_buf) { + page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); + if (!page) { retval = -ENOMEM; goto failed_buf; } + sr_buf = page_address(page); memset(sr_buf, 0, sizeof(*sr_buf)); req->data = sr_buf; - sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req); - sbale->addr = (void *) sr_buf; - sbale->length = sizeof(*sr_buf); + + zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf)); + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); retval = zfcp_fsf_req_send(req); if (retval) @@ -838,34 +797,38 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) goto out; failed_req_send: - mempool_free(sr_buf, adapter->pool.status_read_data); + req->data = NULL; + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); failed_buf: + zfcp_dbf_hba_fsf_uss("fssr__1", req); zfcp_fsf_req_free(req); - zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) { - struct zfcp_unit *unit = req->data; + struct scsi_device *sdev = req->data; + struct zfcp_scsi_dev *zfcp_sdev; union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; + zfcp_sdev = sdev_to_zfcp(sdev); + switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: if (fsq->word[0] == fsq->word[1]) { - zfcp_erp_adapter_reopen(unit->port->adapter, 0, - "fsafch1", req); + zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, + "fsafch1"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; } break; case FSF_LUN_HANDLE_NOT_VALID: if (fsq->word[0] == fsq->word[1]) { - zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req); + zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; } break; @@ -873,17 +836,22 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(unit->port, "fsafch3", req); + zfcp_erp_set_port_status(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_port_reopen(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_BOXED: - zfcp_erp_unit_boxed(unit, "fsafch4", req); + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, + "fsafch4"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: switch (fsq->word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_fc_test_link(unit->port); + zfcp_fc_test_link(zfcp_sdev->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -897,41 +865,40 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) } /** - * zfcp_fsf_abort_fcp_command - abort running SCSI command - * @old_req_id: unsigned long - * @unit: pointer to struct zfcp_unit + * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command + * @scmnd: The SCSI command to abort * Returns: pointer to struct zfcp_fsf_req */ -struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, - struct zfcp_unit *unit) +struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) { - struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; - struct zfcp_qdio *qdio = unit->port->adapter->qdio; + struct scsi_device *sdev = scmnd->device; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; + unsigned long old_req_id = (unsigned long) scmnd->host_scribble; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, + SBAL_SFLAGS0_TYPE_READ, qdio->adapter->pool.scsi_abort); if (IS_ERR(req)) { req = NULL; goto out; } - if (unlikely(!(atomic_read(&unit->status) & + if (unlikely(!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) goto out_error_free; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); - req->data = unit; + req->data = sdev; req->handler = zfcp_fsf_abort_fcp_command_handler; - req->qtcb->header.lun_handle = unit->handle; - req->qtcb->header.port_handle = unit->port->handle; + req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; + req->qtcb->header.port_handle = zfcp_sdev->port->handle; req->qtcb->bottom.support.req_handle = (u64) old_req_id; zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); @@ -942,7 +909,7 @@ out_error_free: zfcp_fsf_req_free(req); req = NULL; out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return req; } @@ -959,7 +926,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: - zfcp_dbf_san_ct_response(req); + zfcp_dbf_san_res("fsscth2", req); ct->status = 0; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: @@ -973,13 +940,11 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) break; } break; - case FSF_ACCESS_DENIED: - break; case FSF_PORT_BOXED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_HANDLE_NOT_VALID: - zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); + zfcp_erp_adapter_reopen(adapter, 0, "fsscth1"); /* fall through */ case FSF_GENERIC_COMMAND_REJECTED: case FSF_PAYLOAD_SIZE_MISMATCH: @@ -995,74 +960,74 @@ skip_fsfstatus: ct->handler(ct->handler_data); } -static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale, +static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio, + struct zfcp_qdio_req *q_req, struct scatterlist *sg_req, struct scatterlist *sg_resp) { - sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; - sbale[2].addr = sg_virt(sg_req); - sbale[2].length = sg_req->length; - sbale[3].addr = sg_virt(sg_resp); - sbale[3].length = sg_resp->length; - sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; -} - -static int zfcp_fsf_one_sbal(struct scatterlist *sg) -{ - return sg_is_last(sg) && sg->length <= PAGE_SIZE; + zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length); + zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length); + zfcp_qdio_set_sbale_last(qdio, q_req); } static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, struct scatterlist *sg_req, - struct scatterlist *sg_resp, - int max_sbals) + struct scatterlist *sg_resp) { struct zfcp_adapter *adapter = req->adapter; - struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio, - &req->qdio_req); + struct zfcp_qdio *qdio = adapter->qdio; + struct fsf_qtcb *qtcb = req->qtcb; u32 feat = adapter->adapter_features; - int bytes; - if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { - if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp)) - return -EOPNOTSUPP; + if (zfcp_adapter_multi_buffer_active(adapter)) { + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) + return -EIO; + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) + return -EIO; - zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp); + zfcp_qdio_set_data_div(qdio, &req->qdio_req, + zfcp_qdio_sbale_count(sg_req)); + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); + zfcp_qdio_set_scount(qdio, &req->qdio_req); return 0; } /* use single, unchained SBAL if it can hold the request */ - if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) { - zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp); + if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { + zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, + sg_req, sg_resp); return 0; } - bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, - SBAL_FLAGS0_TYPE_WRITE_READ, - sg_req, max_sbals); - if (bytes <= 0) + if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) + return -EOPNOTSUPP; + + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) return -EIO; - req->qtcb->bottom.support.req_buf_length = bytes; - req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; - - bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, - SBAL_FLAGS0_TYPE_WRITE_READ, - sg_resp, max_sbals); - req->qtcb->bottom.support.resp_buf_length = bytes; - if (bytes <= 0) + + qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req); + + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); + zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); + + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) return -EIO; + qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp); + + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); + return 0; } static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, struct scatterlist *sg_req, struct scatterlist *sg_resp, - int max_sbals, unsigned int timeout) + unsigned int timeout) { int ret; - ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); + ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); if (ret) return ret; @@ -1089,11 +1054,12 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, struct zfcp_fsf_req *req; int ret = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, + SBAL_SFLAGS0_TYPE_WRITE_READ, pool); if (IS_ERR(req)) { ret = PTR_ERR(req); @@ -1101,8 +1067,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, - FSF_MAX_SBALS_PER_REQ, timeout); + ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); if (ret) goto failed_send; @@ -1110,7 +1075,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, req->qtcb->header.port_handle = wka_port->handle; req->data = ct; - zfcp_dbf_san_ct_request(req, wka_port->d_id); + zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); ret = zfcp_fsf_req_send(req); if (ret) @@ -1121,14 +1086,13 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, failed_send: zfcp_fsf_req_free(req); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return ret; } static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) { struct zfcp_fsf_ct_els *send_els = req->data; - struct zfcp_port *port = send_els->port; struct fsf_qtcb_header *header = &req->qtcb->header; send_els->status = -EINVAL; @@ -1138,7 +1102,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: - zfcp_dbf_san_els_response(req); + zfcp_dbf_san_res("fsselh1", req); send_els->status = 0; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: @@ -1158,12 +1122,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) case FSF_REQUEST_SIZE_TOO_LARGE: case FSF_RESPONSE_SIZE_TOO_LARGE: break; - case FSF_ACCESS_DENIED: - if (port) - zfcp_fsf_access_denied_port(req, port); - break; case FSF_SBAL_MISMATCH: - /* should never occure, avoided in zfcp_fsf_send_els */ + /* should never occur, avoided in zfcp_fsf_send_els */ /* fall through */ default: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -1185,11 +1145,12 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, struct zfcp_qdio *qdio = adapter->qdio; int ret = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, + SBAL_SFLAGS0_TYPE_WRITE_READ, NULL); if (IS_ERR(req)) { ret = PTR_ERR(req); @@ -1197,7 +1158,11 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout); + + if (!zfcp_adapter_multi_buffer_active(adapter)) + zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); + + ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); if (ret) goto failed_send; @@ -1206,7 +1171,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, req->handler = zfcp_fsf_send_els_handler; req->data = els; - zfcp_dbf_san_els_request(req); + zfcp_dbf_san_req("fssels1", req, d_id); ret = zfcp_fsf_req_send(req); if (ret) @@ -1217,22 +1182,22 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, failed_send: zfcp_fsf_req_free(req); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return ret; } int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) { - struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req; struct zfcp_qdio *qdio = erp_action->adapter->qdio; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, + SBAL_SFLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); if (IS_ERR(req)) { @@ -1241,13 +1206,9 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->qtcb->bottom.config.feature_selection = - FSF_FEATURE_CFDC | - FSF_FEATURE_LUN_SHARING | FSF_FEATURE_NOTIFICATION_LOST | FSF_FEATURE_UPDATE_ALERT; req->erp_action = erp_action; @@ -1261,36 +1222,32 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, struct fsf_qtcb_bottom_config *data) { - struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out_unlock; - req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, + SBAL_SFLAGS0_TYPE_READ, NULL); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out_unlock; } - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->handler = zfcp_fsf_exchange_config_data_handler; req->qtcb->bottom.config.feature_selection = - FSF_FEATURE_CFDC | - FSF_FEATURE_LUN_SHARING | FSF_FEATURE_NOTIFICATION_LOST | FSF_FEATURE_UPDATE_ALERT; @@ -1299,7 +1256,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); if (!retval) wait_for_completion(&req->completion); @@ -1307,7 +1264,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, return retval; out_unlock: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1319,18 +1276,18 @@ out_unlock: int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) { struct zfcp_qdio *qdio = erp_action->adapter->qdio; - struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req; int retval = -EIO; if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) return -EOPNOTSUPP; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, + SBAL_SFLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); if (IS_ERR(req)) { @@ -1339,9 +1296,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->handler = zfcp_fsf_exchange_port_data_handler; req->erp_action = erp_action; @@ -1354,7 +1309,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1367,18 +1322,18 @@ out: int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, struct fsf_qtcb_bottom_port *data) { - struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; int retval = -EIO; if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) return -EOPNOTSUPP; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out_unlock; - req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, + SBAL_SFLAGS0_TYPE_READ, NULL); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1388,14 +1343,12 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, if (data) req->data = data; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->handler = zfcp_fsf_exchange_port_data_handler; zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); if (!retval) wait_for_completion(&req->completion); @@ -1405,7 +1358,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, return retval; out_unlock: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1421,15 +1374,13 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_PORT_ALREADY_OPEN: break; - case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_port(req, port); - break; case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: dev_warn(&req->adapter->ccw_device->dev, "Not enough FCP adapter resources to open " "remote port 0x%016Lx\n", (unsigned long long)port->wwpn); - zfcp_erp_port_failed(port, "fsoph_1", req); + zfcp_erp_set_port_status(port, + ZFCP_STATUS_COMMON_ERP_FAILED); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: @@ -1484,17 +1435,17 @@ out: */ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) { - struct qdio_buffer_element *sbale; struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_port *port = erp_action->port; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, + SBAL_SFLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); if (IS_ERR(req)) { @@ -1503,9 +1454,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->handler = zfcp_fsf_open_port_handler; hton24(req->qtcb->bottom.support.d_id, port->d_id); @@ -1522,7 +1471,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) put_device(&port->dev); } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1535,15 +1484,13 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: - zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req); + zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: break; case FSF_GOOD: - zfcp_erp_modify_port_status(port, "fscph_2", req, - ZFCP_STATUS_COMMON_OPEN, - ZFCP_CLEAR); + zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN); break; } } @@ -1555,16 +1502,16 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) */ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) { - struct qdio_buffer_element *sbale; struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, + SBAL_SFLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); if (IS_ERR(req)) { @@ -1573,9 +1520,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->handler = zfcp_fsf_close_port_handler; req->data = erp_action->port; @@ -1590,7 +1535,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1611,8 +1556,6 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) /* fall through */ case FSF_ADAPTER_STATUS_AVAILABLE: req->status |= ZFCP_STATUS_FSFREQ_ERROR; - /* fall through */ - case FSF_ACCESS_DENIED: wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; break; case FSF_GOOD: @@ -1632,27 +1575,25 @@ out: */ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) { - struct qdio_buffer_element *sbale; struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, + SBAL_SFLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); - if (unlikely(IS_ERR(req))) { + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->handler = zfcp_fsf_open_wka_port_handler; hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); @@ -1663,7 +1604,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) if (retval) zfcp_fsf_req_free(req); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1673,7 +1614,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { req->status |= ZFCP_STATUS_FSFREQ_ERROR; - zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req); + zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1"); } wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; @@ -1687,27 +1628,25 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) */ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) { - struct qdio_buffer_element *sbale; struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, + SBAL_SFLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); - if (unlikely(IS_ERR(req))) { + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->handler = zfcp_fsf_close_wka_port_handler; req->data = wka_port; @@ -1718,7 +1657,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) if (retval) zfcp_fsf_req_free(req); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1726,29 +1665,27 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) { struct zfcp_port *port = req->data; struct fsf_qtcb_header *header = &req->qtcb->header; - struct zfcp_unit *unit; + struct scsi_device *sdev; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; switch (header->fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: - zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req); + zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_port(req, port); - break; case FSF_PORT_BOXED: /* can't use generic zfcp_erp_modify_port_status because * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); - read_lock(&port->unit_list_lock); - list_for_each_entry(unit, &port->unit_list, list) - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, - &unit->status); - read_unlock(&port->unit_list_lock); - zfcp_erp_port_boxed(port, "fscpph2", req); + shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, + &sdev_to_zfcp(sdev)->status); + zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, + "fscpph2"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: @@ -1765,11 +1702,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); - read_lock(&port->unit_list_lock); - list_for_each_entry(unit, &port->unit_list, list) - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, - &unit->status); - read_unlock(&port->unit_list_lock); + shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, + &sdev_to_zfcp(sdev)->status); break; } } @@ -1781,16 +1717,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) */ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) { - struct qdio_buffer_element *sbale; struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, + SBAL_SFLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); if (IS_ERR(req)) { @@ -1799,9 +1735,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->data = erp_action->port; req->qtcb->header.port_handle = erp_action->port->handle; @@ -1816,69 +1750,62 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } -static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) +static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) { struct zfcp_adapter *adapter = req->adapter; - struct zfcp_unit *unit = req->data; + struct scsi_device *sdev = req->data; + struct zfcp_scsi_dev *zfcp_sdev; struct fsf_qtcb_header *header = &req->qtcb->header; - struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; - struct fsf_queue_designator *queue_designator = - &header->fsf_status_qual.fsf_queue_designator; - int exclusive, readwrite; + union fsf_status_qual *qual = &header->fsf_status_qual; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; + zfcp_sdev = sdev_to_zfcp(sdev); + atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | - ZFCP_STATUS_COMMON_ACCESS_BOXED | - ZFCP_STATUS_UNIT_SHARED | - ZFCP_STATUS_UNIT_READONLY, - &unit->status); + ZFCP_STATUS_COMMON_ACCESS_BOXED, + &zfcp_sdev->status); switch (header->fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: - zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req); + zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1"); /* fall through */ case FSF_LUN_ALREADY_OPEN: break; - case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_unit(req, unit); - atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); - atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); - break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(unit->port, "fsouh_2", req); + zfcp_erp_set_port_status(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_port_reopen(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_SHARING_VIOLATION: - if (header->fsf_status_qual.word[0]) - dev_warn(&adapter->ccw_device->dev, + if (qual->word[0]) + dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, "LUN 0x%Lx on port 0x%Lx is already in " "use by CSS%d, MIF Image ID %x\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn, - queue_designator->cssid, - queue_designator->hla); - else - zfcp_act_eval_err(adapter, - header->fsf_status_qual.word[2]); - zfcp_erp_unit_access_denied(unit, "fsouh_3", req); - atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); - atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); + zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn, + qual->fsf_queue_designator.cssid, + qual->fsf_queue_designator.hla); + zfcp_erp_set_lun_status(sdev, + ZFCP_STATUS_COMMON_ERP_FAILED | + ZFCP_STATUS_COMMON_ACCESS_DENIED); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: dev_warn(&adapter->ccw_device->dev, "No handle is available for LUN " "0x%016Lx on port 0x%016Lx\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_unit_failed(unit, "fsouh_4", req); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); /* fall through */ case FSF_INVALID_COMMAND_OPTION: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -1886,7 +1813,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_fc_test_link(unit->port); + zfcp_fc_test_link(zfcp_sdev->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -1895,75 +1822,30 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) break; case FSF_GOOD: - unit->handle = header->lun_handle; - atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); - - if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && - (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) && - !zfcp_ccw_priv_sch(adapter)) { - exclusive = (bottom->lun_access_info & - FSF_UNIT_ACCESS_EXCLUSIVE); - readwrite = (bottom->lun_access_info & - FSF_UNIT_ACCESS_OUTBOUND_TRANSFER); - - if (!exclusive) - atomic_set_mask(ZFCP_STATUS_UNIT_SHARED, - &unit->status); - - if (!readwrite) { - atomic_set_mask(ZFCP_STATUS_UNIT_READONLY, - &unit->status); - dev_info(&adapter->ccw_device->dev, - "SCSI device at LUN 0x%016Lx on port " - "0x%016Lx opened read-only\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - } - - if (exclusive && !readwrite) { - dev_err(&adapter->ccw_device->dev, - "Exclusive read-only access not " - "supported (unit 0x%016Lx, " - "port 0x%016Lx)\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_unit_failed(unit, "fsouh_5", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR; - zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req); - } else if (!exclusive && readwrite) { - dev_err(&adapter->ccw_device->dev, - "Shared read-write access not " - "supported (unit 0x%016Lx, port " - "0x%016Lx)\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_unit_failed(unit, "fsouh_7", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR; - zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req); - } - } + zfcp_sdev->lun_handle = header->lun_handle; + atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); break; } } /** - * zfcp_fsf_open_unit - open unit + * zfcp_fsf_open_lun - open LUN * @erp_action: pointer to struct zfcp_erp_action * Returns: 0 on success, error otherwise */ -int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) +int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) { - struct qdio_buffer_element *sbale; struct zfcp_adapter *adapter = erp_action->adapter; struct zfcp_qdio *qdio = adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, + SBAL_SFLAGS0_TYPE_READ, adapter->pool.erp_req); if (IS_ERR(req)) { @@ -1972,14 +1854,12 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->qtcb->header.port_handle = erp_action->port->handle; - req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; - req->handler = zfcp_fsf_open_unit_handler; - req->data = erp_action->unit; + req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); + req->handler = zfcp_fsf_open_lun_handler; + req->data = erp_action->sdev; req->erp_action = erp_action; erp_action->fsf_req_id = req->req_id; @@ -1993,34 +1873,40 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } -static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) +static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) { - struct zfcp_unit *unit = req->data; + struct scsi_device *sdev = req->data; + struct zfcp_scsi_dev *zfcp_sdev; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; + zfcp_sdev = sdev_to_zfcp(sdev); + switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: - zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req); + zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_HANDLE_NOT_VALID: - zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req); + zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(unit->port, "fscuh_3", req); + zfcp_erp_set_port_status(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_port_reopen(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: switch (req->qtcb->header.fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_fc_test_link(unit->port); + zfcp_fc_test_link(zfcp_sdev->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -2028,28 +1914,29 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) } break; case FSF_GOOD: - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); + atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); break; } } /** - * zfcp_fsf_close_unit - close zfcp unit - * @erp_action: pointer to struct zfcp_unit + * zfcp_fsf_close_LUN - close LUN + * @erp_action: pointer to erp_action triggering the "close LUN" * Returns: 0 on success, error otherwise */ -int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) +int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) { - struct qdio_buffer_element *sbale; struct zfcp_qdio *qdio = erp_action->adapter->qdio; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev); struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, + SBAL_SFLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); if (IS_ERR(req)) { @@ -2058,14 +1945,12 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->qtcb->header.port_handle = erp_action->port->handle; - req->qtcb->header.lun_handle = erp_action->unit->handle; - req->handler = zfcp_fsf_close_unit_handler; - req->data = erp_action->unit; + req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; + req->handler = zfcp_fsf_close_lun_handler; + req->data = erp_action->sdev; req->erp_action = erp_action; erp_action->fsf_req_id = req->req_id; @@ -2076,7 +1961,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -2091,7 +1976,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) { struct fsf_qual_latency_info *lat_in; struct latency_cont *lat = NULL; - struct zfcp_unit *unit = req->unit; + struct zfcp_scsi_dev *zfcp_sdev; struct zfcp_blk_drv_data blktrc; int ticks = req->adapter->timer_ticks; @@ -2101,32 +1986,38 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) blktrc.flags |= ZFCP_BLK_REQ_ERROR; - blktrc.inb_usage = req->qdio_req.qdio_inb_usage; + blktrc.inb_usage = 0; blktrc.outb_usage = req->qdio_req.qdio_outb_usage; - if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { + if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && + !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { + zfcp_sdev = sdev_to_zfcp(scsi->device); blktrc.flags |= ZFCP_BLK_LAT_VALID; blktrc.channel_lat = lat_in->channel_lat * ticks; blktrc.fabric_lat = lat_in->fabric_lat * ticks; switch (req->qtcb->bottom.io.data_direction) { + case FSF_DATADIR_DIF_READ_STRIP: + case FSF_DATADIR_DIF_READ_CONVERT: case FSF_DATADIR_READ: - lat = &unit->latencies.read; + lat = &zfcp_sdev->latencies.read; break; + case FSF_DATADIR_DIF_WRITE_INSERT: + case FSF_DATADIR_DIF_WRITE_CONVERT: case FSF_DATADIR_WRITE: - lat = &unit->latencies.write; + lat = &zfcp_sdev->latencies.write; break; case FSF_DATADIR_CMND: - lat = &unit->latencies.cmd; + lat = &zfcp_sdev->latencies.cmd; break; } if (lat) { - spin_lock(&unit->latencies.lock); + spin_lock(&zfcp_sdev->latencies.lock); zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); lat->counter++; - spin_unlock(&unit->latencies.lock); + spin_unlock(&zfcp_sdev->latencies.lock); } } @@ -2134,219 +2025,248 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) sizeof(blktrc)); } -static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) +static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) { - struct scsi_cmnd *scpnt; - struct fcp_resp_with_ext *fcp_rsp; - unsigned long flags; - - read_lock_irqsave(&req->adapter->abort_lock, flags); - - scpnt = req->data; - if (unlikely(!scpnt)) { - read_unlock_irqrestore(&req->adapter->abort_lock, flags); - return; - } - - if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { - set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); - goto skip_fsfstatus; - } - - fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; - zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); - - zfcp_fsf_req_trace(req, scpnt); - -skip_fsfstatus: - zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); - - scpnt->host_scribble = NULL; - (scpnt->scsi_done) (scpnt); - /* - * We must hold this lock until scsi_done has been called. - * Otherwise we may call scsi_done after abort regarding this - * command has completed. - * Note: scsi_done must not block! - */ - read_unlock_irqrestore(&req->adapter->abort_lock, flags); -} - -static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req) -{ - struct fcp_resp_with_ext *fcp_rsp; - struct fcp_resp_rsp_info *rsp_info; - - fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; - rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; - - if ((rsp_info->rsp_code != FCP_TMF_CMPL) || - (req->status & ZFCP_STATUS_FSFREQ_ERROR)) - req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; -} - - -static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) -{ - struct zfcp_unit *unit; + struct scsi_cmnd *scmnd = req->data; + struct scsi_device *sdev = scmnd->device; + struct zfcp_scsi_dev *zfcp_sdev; struct fsf_qtcb_header *header = &req->qtcb->header; - if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)) - unit = req->data; - else - unit = req->unit; - if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) - goto skip_fsfstatus; + return; + + zfcp_sdev = sdev_to_zfcp(sdev); switch (header->fsf_status) { case FSF_HANDLE_MISMATCH: case FSF_PORT_HANDLE_NOT_VALID: - zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req); + zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_FCPLUN_NOT_VALID: case FSF_LUN_HANDLE_NOT_VALID: - zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req); + zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: zfcp_fsf_class_not_supp(req); break; - case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_unit(req, unit); - break; case FSF_DIRECTION_INDICATOR_NOT_VALID: dev_err(&req->adapter->ccw_device->dev, - "Incorrect direction %d, unit 0x%016Lx on port " + "Incorrect direction %d, LUN 0x%016Lx on port " "0x%016Lx closed\n", req->qtcb->bottom.io.data_direction, - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3", - req); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, + "fssfch3"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_CMND_LENGTH_NOT_VALID: dev_err(&req->adapter->ccw_device->dev, - "Incorrect CDB length %d, unit 0x%016Lx on " + "Incorrect CDB length %d, LUN 0x%016Lx on " "port 0x%016Lx closed\n", req->qtcb->bottom.io.fcp_cmnd_length, - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4", - req); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, + "fssfch4"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(unit->port, "fssfch5", req); + zfcp_erp_set_port_status(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_port_reopen(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_BOXED: - zfcp_erp_unit_boxed(unit, "fssfch6", req); + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, + "fssfch6"); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: if (header->fsf_status_qual.word[0] == FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) - zfcp_fc_test_link(unit->port); + zfcp_fc_test_link(zfcp_sdev->port); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } +} + +static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) +{ + struct scsi_cmnd *scpnt; + struct fcp_resp_with_ext *fcp_rsp; + unsigned long flags; + + read_lock_irqsave(&req->adapter->abort_lock, flags); + + scpnt = req->data; + if (unlikely(!scpnt)) { + read_unlock_irqrestore(&req->adapter->abort_lock, flags); + return; + } + + zfcp_fsf_fcp_handler_common(req); + + if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { + set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); + goto skip_fsfstatus; + } + + switch (req->qtcb->header.fsf_status) { + case FSF_INCONSISTENT_PROT_DATA: + case FSF_INVALID_PROT_PARM: + set_host_byte(scpnt, DID_ERROR); + goto skip_fsfstatus; + case FSF_BLOCK_GUARD_CHECK_FAILURE: + zfcp_scsi_dif_sense_error(scpnt, 0x1); + goto skip_fsfstatus; + case FSF_APP_TAG_CHECK_FAILURE: + zfcp_scsi_dif_sense_error(scpnt, 0x2); + goto skip_fsfstatus; + case FSF_REF_TAG_CHECK_FAILURE: + zfcp_scsi_dif_sense_error(scpnt, 0x3); + goto skip_fsfstatus; + } + fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; + zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); + skip_fsfstatus: - if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) - zfcp_fsf_send_fcp_ctm_handler(req); - else { - zfcp_fsf_send_fcp_command_task_handler(req); - req->unit = NULL; - put_device(&unit->dev); + zfcp_fsf_req_trace(req, scpnt); + zfcp_dbf_scsi_result(scpnt, req); + + scpnt->host_scribble = NULL; + (scpnt->scsi_done) (scpnt); + /* + * We must hold this lock until scsi_done has been called. + * Otherwise we may call scsi_done after abort regarding this + * command has completed. + * Note: scsi_done must not block! + */ + read_unlock_irqrestore(&req->adapter->abort_lock, flags); +} + +static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) +{ + switch (scsi_get_prot_op(scsi_cmnd)) { + case SCSI_PROT_NORMAL: + switch (scsi_cmnd->sc_data_direction) { + case DMA_NONE: + *data_dir = FSF_DATADIR_CMND; + break; + case DMA_FROM_DEVICE: + *data_dir = FSF_DATADIR_READ; + break; + case DMA_TO_DEVICE: + *data_dir = FSF_DATADIR_WRITE; + break; + case DMA_BIDIRECTIONAL: + return -EINVAL; + } + break; + + case SCSI_PROT_READ_STRIP: + *data_dir = FSF_DATADIR_DIF_READ_STRIP; + break; + case SCSI_PROT_WRITE_INSERT: + *data_dir = FSF_DATADIR_DIF_WRITE_INSERT; + break; + case SCSI_PROT_READ_PASS: + *data_dir = FSF_DATADIR_DIF_READ_CONVERT; + break; + case SCSI_PROT_WRITE_PASS: + *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT; + break; + default: + return -EINVAL; } + + return 0; } /** - * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) - * @unit: unit where command is sent to + * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command) * @scsi_cmnd: scsi command to be sent */ -int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, - struct scsi_cmnd *scsi_cmnd) +int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) { struct zfcp_fsf_req *req; struct fcp_cmnd *fcp_cmnd; - unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; - int real_bytes, retval = -EIO; - struct zfcp_adapter *adapter = unit->port->adapter; + u8 sbtype = SBAL_SFLAGS0_TYPE_READ; + int retval = -EIO; + struct scsi_device *sdev = scsi_cmnd->device; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct zfcp_qdio *qdio = adapter->qdio; + struct fsf_qtcb_bottom_io *io; + unsigned long flags; - if (unlikely(!(atomic_read(&unit->status) & + if (unlikely(!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return -EBUSY; - spin_lock(&qdio->req_q_lock); - if (atomic_read(&qdio->req_q.count) <= 0) { + spin_lock_irqsave(&qdio->req_q_lock, flags); + if (atomic_read(&qdio->req_q_free) <= 0) { atomic_inc(&qdio->req_q_full); goto out; } + if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) + sbtype = SBAL_SFLAGS0_TYPE_WRITE; + req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, - adapter->pool.scsi_req); + sbtype, adapter->pool.scsi_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + scsi_cmnd->host_scribble = (unsigned char *) req->req_id; + + io = &req->qtcb->bottom.io; req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - get_device(&unit->dev); - req->unit = unit; req->data = scsi_cmnd; - req->handler = zfcp_fsf_send_fcp_command_handler; - req->qtcb->header.lun_handle = unit->handle; - req->qtcb->header.port_handle = unit->port->handle; - req->qtcb->bottom.io.service_class = FSF_CLASS_3; - req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; + req->handler = zfcp_fsf_fcp_cmnd_handler; + req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; + req->qtcb->header.port_handle = zfcp_sdev->port->handle; + io->service_class = FSF_CLASS_3; + io->fcp_cmnd_length = FCP_CMND_LEN; - scsi_cmnd->host_scribble = (unsigned char *) req->req_id; + if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) { + io->data_block_length = scsi_cmnd->device->sector_size; + io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; + } - /* - * set depending on data direction: - * data direction bits in SBALE (SB Type) - * data direction bits in QTCB - */ - switch (scsi_cmnd->sc_data_direction) { - case DMA_NONE: - req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; - break; - case DMA_FROM_DEVICE: - req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; - break; - case DMA_TO_DEVICE: - req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; - sbtype = SBAL_FLAGS0_TYPE_WRITE; - break; - case DMA_BIDIRECTIONAL: + if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) goto failed_scsi_cmnd; - } fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; - zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); + zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); - real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype, - scsi_sglist(scsi_cmnd), - FSF_MAX_SBALS_PER_REQ); - if (unlikely(real_bytes < 0)) { - if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) { - dev_err(&adapter->ccw_device->dev, - "Oversize data package, unit 0x%016Lx " - "on port 0x%016Lx closed\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req); - retval = -EINVAL; - } - goto failed_scsi_cmnd; + if (scsi_prot_sg_count(scsi_cmnd)) { + zfcp_qdio_set_data_div(qdio, &req->qdio_req, + scsi_prot_sg_count(scsi_cmnd)); + retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, + scsi_prot_sglist(scsi_cmnd)); + if (retval) + goto failed_scsi_cmnd; + io->prot_data_length = zfcp_qdio_real_bytes( + scsi_prot_sglist(scsi_cmnd)); } + retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, + scsi_sglist(scsi_cmnd)); + if (unlikely(retval)) + goto failed_scsi_cmnd; + + zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); + if (zfcp_adapter_multi_buffer_active(adapter)) + zfcp_qdio_set_scount(qdio, &req->qdio_req); + retval = zfcp_fsf_req_send(req); if (unlikely(retval)) goto failed_scsi_cmnd; @@ -2354,36 +2274,52 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, goto out; failed_scsi_cmnd: - put_device(&unit->dev); zfcp_fsf_req_free(req); scsi_cmnd->host_scribble = NULL; out: - spin_unlock(&qdio->req_q_lock); + spin_unlock_irqrestore(&qdio->req_q_lock, flags); return retval; } +static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) +{ + struct fcp_resp_with_ext *fcp_rsp; + struct fcp_resp_rsp_info *rsp_info; + + zfcp_fsf_fcp_handler_common(req); + + fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; + rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; + + if ((rsp_info->rsp_code != FCP_TMF_CMPL) || + (req->status & ZFCP_STATUS_FSFREQ_ERROR)) + req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; +} + /** - * zfcp_fsf_send_fcp_ctm - send SCSI task management command - * @unit: pointer to struct zfcp_unit + * zfcp_fsf_fcp_task_mgmt - send SCSI task management command + * @scmnd: SCSI command to send the task management command for * @tm_flags: unsigned byte for task management flags * Returns: on success pointer to struct fsf_req, NULL otherwise */ -struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) +struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, + u8 tm_flags) { - struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; struct fcp_cmnd *fcp_cmnd; - struct zfcp_qdio *qdio = unit->port->adapter->qdio; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device); + struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; - if (unlikely(!(atomic_read(&unit->status) & + if (unlikely(!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return NULL; - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) + spin_lock_irq(&qdio->req_q_lock); + if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, + SBAL_SFLAGS0_TYPE_WRITE, qdio->adapter->pool.scsi_req); if (IS_ERR(req)) { @@ -2391,21 +2327,18 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) goto out; } - req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; - req->data = unit; - req->handler = zfcp_fsf_send_fcp_command_handler; - req->qtcb->header.lun_handle = unit->handle; - req->qtcb->header.port_handle = unit->port->handle; + req->data = scmnd; + req->handler = zfcp_fsf_fcp_task_mgmt_handler; + req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; + req->qtcb->header.port_handle = zfcp_sdev->port->handle; req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; req->qtcb->bottom.io.service_class = FSF_CLASS_3; req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; - zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags); + zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags); zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); if (!zfcp_fsf_req_send(req)) @@ -2414,82 +2347,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) zfcp_fsf_req_free(req); req = NULL; out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return req; } -static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req) -{ -} - -/** - * zfcp_fsf_control_file - control file upload/download - * @adapter: pointer to struct zfcp_adapter - * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc - * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise - */ -struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, - struct zfcp_fsf_cfdc *fsf_cfdc) -{ - struct qdio_buffer_element *sbale; - struct zfcp_qdio *qdio = adapter->qdio; - struct zfcp_fsf_req *req = NULL; - struct fsf_qtcb_bottom_support *bottom; - int direction, retval = -EIO, bytes; - - if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) - return ERR_PTR(-EOPNOTSUPP); - - switch (fsf_cfdc->command) { - case FSF_QTCB_DOWNLOAD_CONTROL_FILE: - direction = SBAL_FLAGS0_TYPE_WRITE; - break; - case FSF_QTCB_UPLOAD_CONTROL_FILE: - direction = SBAL_FLAGS0_TYPE_READ; - break; - default: - return ERR_PTR(-EINVAL); - } - - spin_lock_bh(&qdio->req_q_lock); - if (zfcp_fsf_req_sbal_get(qdio)) - goto out; - - req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL); - if (IS_ERR(req)) { - retval = -EPERM; - goto out; - } - - req->handler = zfcp_fsf_control_file_handler; - - sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); - sbale[0].flags |= direction; - - bottom = &req->qtcb->bottom.support; - bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; - bottom->option = fsf_cfdc->option; - - bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, - direction, fsf_cfdc->sg, - FSF_MAX_SBALS_PER_REQ); - if (bytes != ZFCP_CFDC_MAX_SIZE) { - zfcp_fsf_req_free(req); - goto out; - } - - zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); - retval = zfcp_fsf_req_send(req); -out: - spin_unlock_bh(&qdio->req_q_lock); - - if (!retval) { - wait_for_completion(&req->completion); - return req; - } - return ERR_PTR(retval); -} - /** * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO * @adapter: pointer to struct zfcp_adapter @@ -2498,7 +2359,7 @@ out: void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) { struct zfcp_adapter *adapter = qdio->adapter; - struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; + struct qdio_buffer *sbal = qdio->res_q[sbal_idx]; struct qdio_buffer_element *sbale; struct zfcp_fsf_req *fsf_req; unsigned long req_id; @@ -2510,20 +2371,20 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) req_id = (unsigned long) sbale->addr; fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); - if (!fsf_req) + if (!fsf_req) { /* * Unknown request means that we have potentially memory * corruption and must stop the machine immediately. */ + zfcp_qdio_siosl(adapter); panic("error: unknown req_id (%lx) on adapter %s.\n", req_id, dev_name(&adapter->ccw_device->dev)); + } fsf_req->qdio_req.sbal_response = sbal_idx; - fsf_req->qdio_req.qdio_inb_usage = - atomic_read(&qdio->resp_q.count); zfcp_fsf_req_complete(fsf_req); - if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) + if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY)) break; } } diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index b3de682b64c..57ae3ae1046 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -3,7 +3,7 @@ * * Interface to the FSF support functions. * - * Copyright IBM Corporation 2002, 2009 + * Copyright IBM Corp. 2002, 2010 */ #ifndef FSF_H @@ -36,13 +36,6 @@ #define FSF_CONFIG_COMMAND 0x00000003 #define FSF_PORT_COMMAND 0x00000004 -/* FSF control file upload/download operations' subtype and options */ -#define FSF_CFDC_OPERATION_SUBTYPE 0x00020001 -#define FSF_CFDC_OPTION_NORMAL_MODE 0x00000000 -#define FSF_CFDC_OPTION_FORCE 0x00000001 -#define FSF_CFDC_OPTION_FULL_ACCESS 0x00000002 -#define FSF_CFDC_OPTION_RESTRICTED_ACCESS 0x00000004 - /* FSF protocol states */ #define FSF_PROT_GOOD 0x00000001 #define FSF_PROT_QTCB_VERSION_ERROR 0x00000010 @@ -64,7 +57,6 @@ #define FSF_HANDLE_MISMATCH 0x00000005 #define FSF_SERVICE_CLASS_NOT_SUPPORTED 0x00000006 #define FSF_FCPLUN_NOT_VALID 0x00000009 -#define FSF_ACCESS_DENIED 0x00000010 #define FSF_LUN_SHARING_VIOLATION 0x00000012 #define FSF_FCP_COMMAND_DOES_NOT_EXIST 0x00000022 #define FSF_DIRECTION_INDICATOR_NOT_VALID 0x00000030 @@ -80,11 +72,15 @@ #define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061 #define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062 #define FSF_SBAL_MISMATCH 0x00000063 +#define FSF_INCONSISTENT_PROT_DATA 0x00000070 +#define FSF_INVALID_PROT_PARM 0x00000071 +#define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081 +#define FSF_APP_TAG_CHECK_FAILURE 0x00000082 +#define FSF_REF_TAG_CHECK_FAILURE 0x00000083 #define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD #define FSF_UNKNOWN_COMMAND 0x000000E2 #define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 #define FSF_INVALID_COMMAND_OPTION 0x000000E5 -/* #define FSF_ERROR 0x000000FF */ #define FSF_PROT_STATUS_QUAL_SIZE 16 #define FSF_STATUS_QUALIFIER_SIZE 16 @@ -126,7 +122,6 @@ #define FSF_STATUS_READ_LINK_DOWN 0x00000005 #define FSF_STATUS_READ_LINK_UP 0x00000006 #define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009 -#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A #define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C /* status subtypes for link down */ @@ -136,7 +131,6 @@ /* status subtypes for unsolicited status notification lost */ #define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001 -#define FSF_STATUS_READ_SUB_ACT_UPDATED 0x00000020 /* topologie that is detected by the adapter */ #define FSF_TOPO_P2P 0x00000001 @@ -147,24 +141,28 @@ #define FSF_DATADIR_WRITE 0x00000001 #define FSF_DATADIR_READ 0x00000002 #define FSF_DATADIR_CMND 0x00000004 +#define FSF_DATADIR_DIF_WRITE_INSERT 0x00000009 +#define FSF_DATADIR_DIF_READ_STRIP 0x0000000a +#define FSF_DATADIR_DIF_WRITE_CONVERT 0x0000000b +#define FSF_DATADIR_DIF_READ_CONVERT 0X0000000c + +/* data protection control flags */ +#define FSF_APP_TAG_CHECK_ENABLE 0x10 /* fc service class */ #define FSF_CLASS_3 0x00000003 -/* SBAL chaining */ -#define FSF_MAX_SBALS_PER_REQ 36 - /* logging space behind QTCB */ #define FSF_QTCB_LOG_SIZE 1024 /* channel features */ -#define FSF_FEATURE_CFDC 0x00000002 -#define FSF_FEATURE_LUN_SHARING 0x00000004 #define FSF_FEATURE_NOTIFICATION_LOST 0x00000008 #define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010 #define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 #define FSF_FEATURE_UPDATE_ALERT 0x00000100 #define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 +#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000 +#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000 /* host connection features */ #define FSF_FEATURE_NPIV_MODE 0x00000001 @@ -172,20 +170,6 @@ /* option */ #define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001 -/* open LUN access flags*/ -#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000 -#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000 - -/* FSF interface for CFDC */ -#define ZFCP_CFDC_MAX_SIZE 127 * 1024 -#define ZFCP_CFDC_PAGES PFN_UP(ZFCP_CFDC_MAX_SIZE) - -struct zfcp_fsf_cfdc { - struct scatterlist sg[ZFCP_CFDC_PAGES]; - u32 command; - u32 option; -}; - struct fsf_queue_designator { u8 cssid; u8 chpid; @@ -319,9 +303,14 @@ struct fsf_qtcb_header { struct fsf_qtcb_bottom_io { u32 data_direction; u32 service_class; - u8 res1[8]; + u8 res1; + u8 data_prot_flags; + u16 app_tag_value; + u32 ref_tag_value; u32 fcp_cmnd_length; - u8 res2[12]; + u32 data_block_length; + u32 prot_data_length; + u8 res2[4]; u8 fcp_cmnd[FSF_FCP_CMND_SIZE]; u8 fcp_rsp[FSF_FCP_RSP_SIZE]; u8 res3[64]; @@ -347,6 +336,8 @@ struct fsf_qtcb_bottom_support { u8 els[256]; } __attribute__ ((packed)); +#define ZFCP_FSF_TIMER_INT_MASK 0x3FFF + struct fsf_qtcb_bottom_config { u32 lic_version; u32 feature_selection; @@ -361,7 +352,7 @@ struct fsf_qtcb_bottom_config { u32 adapter_type; u8 res0; u8 peer_d_id[3]; - u8 res1[2]; + u16 status_read_buf_num; u16 timer_interval; u8 res2[9]; u8 s_id[3]; diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 6479273a309..06025cdaa4a 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -3,17 +3,23 @@ * * Setup and helper functions to access QDIO. * - * Copyright IBM Corporation 2002, 2009 + * Copyright IBM Corp. 2002, 2010 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/slab.h> +#include <linux/module.h> #include "zfcp_ext.h" #include "zfcp_qdio.h" #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) +static bool enable_multibuffer = 1; +module_param_named(datarouter, enable_multibuffer, bool, 0400); +MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)"); + static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) { int pos; @@ -29,15 +35,21 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) return 0; } -static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) +static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id, + unsigned int qdio_err) { struct zfcp_adapter *adapter = qdio->adapter; dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); + if (qdio_err & QDIO_ERROR_SLSB_STATE) { + zfcp_qdio_siosl(adapter); + zfcp_erp_adapter_shutdown(adapter, 0, id); + return; + } zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | - ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); + ZFCP_STATUS_COMMON_ERP_FAILED, id); } static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) @@ -54,72 +66,67 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) { unsigned long long now, span; - int free, used; + int used; - spin_lock(&qdio->stat_lock); - now = get_clock_monotonic(); + now = get_tod_clock_monotonic(); span = (now - qdio->req_q_time) >> 12; - free = atomic_read(&qdio->req_q.count); - used = QDIO_MAX_BUFFERS_PER_Q - free; + used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); qdio->req_q_util += used * span; qdio->req_q_time = now; - spin_unlock(&qdio->stat_lock); } static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, - int queue_no, int first, int count, + int queue_no, int idx, int count, unsigned long parm) { struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; - struct zfcp_qdio_queue *queue = &qdio->req_q; if (unlikely(qdio_err)) { - zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, - count); - zfcp_qdio_handler_error(qdio, "qdireq1"); + zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); return; } /* cleanup all SBALs being program-owned now */ - zfcp_qdio_zero_sbals(queue->sbal, first, count); + zfcp_qdio_zero_sbals(qdio->req_q, idx, count); + spin_lock_irq(&qdio->stat_lock); zfcp_qdio_account(qdio); - atomic_add(count, &queue->count); + spin_unlock_irq(&qdio->stat_lock); + atomic_add(count, &qdio->req_q_free); wake_up(&qdio->req_q_wq); } -static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed) -{ - struct zfcp_qdio_queue *queue = &qdio->resp_q; - struct ccw_device *cdev = qdio->adapter->ccw_device; - u8 count, start = queue->first; - unsigned int retval; - - count = atomic_read(&queue->count) + processed; - - retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count); - - if (unlikely(retval)) { - atomic_set(&queue->count, count); - zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL); - } else { - queue->first += count; - queue->first %= QDIO_MAX_BUFFERS_PER_Q; - atomic_set(&queue->count, 0); - } -} - static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, - int queue_no, int first, int count, + int queue_no, int idx, int count, unsigned long parm) { struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; - int sbal_idx, sbal_no; + struct zfcp_adapter *adapter = qdio->adapter; + int sbal_no, sbal_idx; if (unlikely(qdio_err)) { - zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, - count); - zfcp_qdio_handler_error(qdio, "qdires1"); + if (zfcp_adapter_multi_buffer_active(adapter)) { + void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1]; + struct qdio_buffer_element *sbale; + u64 req_id; + u8 scount; + + memset(pl, 0, + ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); + sbale = qdio->res_q[idx]->element; + req_id = (u64) sbale->addr; + scount = min(sbale->scount + 1, + ZFCP_QDIO_MAX_SBALS_PER_REQ + 1); + /* incl. signaling SBAL */ + + for (sbal_no = 0; sbal_no < scount; sbal_no++) { + sbal_idx = (idx + sbal_no) % + QDIO_MAX_BUFFERS_PER_Q; + pl[sbal_no] = qdio->res_q[sbal_idx]; + } + zfcp_dbf_hba_def_err(adapter, req_id, scount, pl); + } + zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); return; } @@ -128,36 +135,26 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, * returned by QDIO layer */ for (sbal_no = 0; sbal_no < count; sbal_no++) { - sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; + sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; /* go through all SBALEs of SBAL */ zfcp_fsf_reqid_check(qdio, sbal_idx); } /* - * put range of SBALs back to response queue - * (including SBALs which have already been free before) + * put SBALs back to response queue */ - zfcp_qdio_resp_put_back(qdio, count); -} - -static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, - struct zfcp_qdio_req *q_req, int max_sbals) -{ - int count = atomic_read(&qdio->req_q.count); - count = min(count, max_sbals); - q_req->sbal_limit = (q_req->sbal_first + count - 1) - % QDIO_MAX_BUFFERS_PER_Q; + if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count)) + zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2"); } static struct qdio_buffer_element * -zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, - unsigned long sbtype) +zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { struct qdio_buffer_element *sbale; /* set last entry flag in current SBALE of current SBAL */ sbale = zfcp_qdio_sbale_curr(qdio, q_req); - sbale->flags |= SBAL_FLAGS_LAST_ENTRY; + sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY; /* don't exceed last allowed SBAL */ if (q_req->sbal_last == q_req->sbal_limit) @@ -165,7 +162,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, /* set chaining flag in first SBALE of current SBAL */ sbale = zfcp_qdio_sbale_req(qdio, q_req); - sbale->flags |= SBAL_FLAGS0_MORE_SBALS; + sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS; /* calculate index of next SBAL */ q_req->sbal_last++; @@ -173,102 +170,96 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, /* keep this requests number of SBALs up-to-date */ q_req->sbal_number++; + BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ); /* start at first SBALE of new SBAL */ q_req->sbale_curr = 0; /* set storage-block type for new SBAL */ sbale = zfcp_qdio_sbale_curr(qdio, q_req); - sbale->flags |= sbtype; + sbale->sflags |= q_req->sbtype; return sbale; } static struct qdio_buffer_element * -zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, - unsigned int sbtype) +zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { - if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) - return zfcp_qdio_sbal_chain(qdio, q_req, sbtype); + if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1) + return zfcp_qdio_sbal_chain(qdio, q_req); q_req->sbale_curr++; return zfcp_qdio_sbale_curr(qdio, q_req); } -static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, - struct zfcp_qdio_req *q_req) -{ - struct qdio_buffer **sbal = qdio->req_q.sbal; - int first = q_req->sbal_first; - int last = q_req->sbal_last; - int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % - QDIO_MAX_BUFFERS_PER_Q + 1; - zfcp_qdio_zero_sbals(sbal, first, count); -} - -static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, - struct zfcp_qdio_req *q_req, - unsigned int sbtype, void *start_addr, - unsigned int total_length) +/** + * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list + * @qdio: pointer to struct zfcp_qdio + * @q_req: pointer to struct zfcp_qdio_req + * @sg: scatter-gather list + * @max_sbals: upper bound for number of SBALs to be used + * Returns: zero or -EINVAL on error + */ +int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, + struct scatterlist *sg) { struct qdio_buffer_element *sbale; - unsigned long remaining, length; - void *addr; - /* split segment up */ - for (addr = start_addr, remaining = total_length; remaining > 0; - addr += length, remaining -= length) { - sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype); + /* set storage-block type for this request */ + sbale = zfcp_qdio_sbale_req(qdio, q_req); + sbale->sflags |= q_req->sbtype; + + for (; sg; sg = sg_next(sg)) { + sbale = zfcp_qdio_sbale_next(qdio, q_req); if (!sbale) { atomic_inc(&qdio->req_q_full); - zfcp_qdio_undo_sbals(qdio, q_req); + zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, + q_req->sbal_number); return -EINVAL; } - - /* new piece must not exceed next page boundary */ - length = min(remaining, - (PAGE_SIZE - ((unsigned long)addr & - (PAGE_SIZE - 1)))); - sbale->addr = addr; - sbale->length = length; + sbale->addr = sg_virt(sg); + sbale->length = sg->length; } return 0; } +static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) +{ + if (atomic_read(&qdio->req_q_free) || + !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) + return 1; + return 0; +} + /** - * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list - * @fsf_req: request to be processed - * @sbtype: SBALE flags - * @sg: scatter-gather list - * @max_sbals: upper bound for number of SBALs to be used - * Returns: number of bytes, or error (negativ) + * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary + * @qdio: pointer to struct zfcp_qdio + * + * The req_q_lock must be held by the caller of this function, and + * this function may only be called from process context; it will + * sleep when waiting for a free sbal. + * + * Returns: 0 on success, -EIO if there is no free sbal after waiting. */ -int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, - unsigned long sbtype, struct scatterlist *sg, - int max_sbals) +int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) { - struct qdio_buffer_element *sbale; - int retval, bytes = 0; + long ret; - /* figure out last allowed SBAL */ - zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); + ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, + zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); - /* set storage-block type for this request */ - sbale = zfcp_qdio_sbale_req(qdio, q_req); - sbale->flags |= sbtype; + if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) + return -EIO; - for (; sg; sg = sg_next(sg)) { - retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype, - sg_virt(sg), sg->length); - if (retval < 0) - return retval; - bytes += sg->length; - } + if (ret > 0) + return 0; - /* assume that no other SBALEs are to follow in the same SBAL */ - sbale = zfcp_qdio_sbale_curr(qdio, q_req); - sbale->flags |= SBAL_FLAGS_LAST_ENTRY; + if (!ret) { + atomic_inc(&qdio->req_q_full); + /* assume hanging outbound queue, try queue recovery */ + zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); + } - return bytes; + return -EIO; } /** @@ -279,25 +270,27 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, */ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { - struct zfcp_qdio_queue *req_q = &qdio->req_q; - int first = q_req->sbal_first; - int count = q_req->sbal_number; int retval; - unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; + u8 sbal_number = q_req->sbal_number; + spin_lock(&qdio->stat_lock); zfcp_qdio_account(qdio); + spin_unlock(&qdio->stat_lock); + + retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, + q_req->sbal_first, sbal_number); - retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, - count); if (unlikely(retval)) { - zfcp_qdio_zero_sbals(req_q->sbal, first, count); + zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, + sbal_number); return retval; } /* account for transferred buffers */ - atomic_sub(count, &req_q->count); - req_q->first += count; - req_q->first %= QDIO_MAX_BUFFERS_PER_Q; + atomic_sub(sbal_number, &qdio->req_q_free); + qdio->req_q_idx += sbal_number; + qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q; + return 0; } @@ -305,24 +298,25 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, struct zfcp_qdio *qdio) { - + memset(id, 0, sizeof(*id)); id->cdev = qdio->adapter->ccw_device; id->q_format = QDIO_ZFCP_QFMT; memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); ASCEBC(id->adapter_name, 8); - id->qib_param_field_format = 0; - id->qib_param_field = NULL; - id->input_slib_elements = NULL; - id->output_slib_elements = NULL; + id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; + if (enable_multibuffer) + id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE; id->no_input_qs = 1; id->no_output_qs = 1; id->input_handler = zfcp_qdio_int_resp; id->output_handler = zfcp_qdio_int_req; id->int_parm = (unsigned long) qdio; - id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); - id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); - + id->input_sbal_addr_array = (void **) (qdio->res_q); + id->output_sbal_addr_array = (void **) (qdio->req_q); + id->scan_threshold = + QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2; } + /** * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data * @adapter: pointer to struct zfcp_adapter @@ -333,11 +327,12 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) { struct qdio_initialize init_data; - if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || - zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) + if (zfcp_qdio_buffers_enqueue(qdio->req_q) || + zfcp_qdio_buffers_enqueue(qdio->res_q)) return -ENOMEM; zfcp_qdio_setup_init_data(&init_data, qdio); + init_waitqueue_head(&qdio->req_q_wq); return qdio_allocate(&init_data); } @@ -348,32 +343,30 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) */ void zfcp_qdio_close(struct zfcp_qdio *qdio) { - struct zfcp_qdio_queue *req_q; - int first, count; + struct zfcp_adapter *adapter = qdio->adapter; + int idx, count; - if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) + if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return; /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ - req_q = &qdio->req_q; - spin_lock_bh(&qdio->req_q_lock); - atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); - spin_unlock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); + atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); + spin_unlock_irq(&qdio->req_q_lock); - qdio_shutdown(qdio->adapter->ccw_device, - QDIO_FLAG_CLEANUP_USING_CLEAR); + wake_up(&qdio->req_q_wq); + + qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); /* cleanup used outbound sbals */ - count = atomic_read(&req_q->count); + count = atomic_read(&qdio->req_q_free); if (count < QDIO_MAX_BUFFERS_PER_Q) { - first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; + idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q; count = QDIO_MAX_BUFFERS_PER_Q - count; - zfcp_qdio_zero_sbals(req_q->sbal, first, count); + zfcp_qdio_zero_sbals(qdio->req_q, idx, count); } - req_q->first = 0; - atomic_set(&req_q->count, 0); - qdio->resp_q.first = 0; - atomic_set(&qdio->resp_q.count, 0); + qdio->req_q_idx = 0; + atomic_set(&qdio->req_q_free, 0); } /** @@ -385,34 +378,63 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) { struct qdio_buffer_element *sbale; struct qdio_initialize init_data; - struct ccw_device *cdev = qdio->adapter->ccw_device; + struct zfcp_adapter *adapter = qdio->adapter; + struct ccw_device *cdev = adapter->ccw_device; + struct qdio_ssqd_desc ssqd; int cc; - if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) + if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) return -EIO; + atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, + &qdio->adapter->status); + zfcp_qdio_setup_init_data(&init_data, qdio); if (qdio_establish(&init_data)) goto failed_establish; + if (qdio_get_ssqd_desc(init_data.cdev, &ssqd)) + goto failed_qdio; + + if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) + atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, + &qdio->adapter->status); + + if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { + atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); + qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; + } else { + atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); + qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; + } + + qdio->max_sbale_per_req = + ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal + - 2; if (qdio_activate(cdev)) goto failed_qdio; for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { - sbale = &(qdio->resp_q.sbal[cc]->element[0]); + sbale = &(qdio->res_q[cc]->element[0]); sbale->length = 0; - sbale->flags = SBAL_FLAGS_LAST_ENTRY; + sbale->eflags = SBAL_EFLAGS_LAST_ENTRY; + sbale->sflags = 0; sbale->addr = NULL; } - if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, - QDIO_MAX_BUFFERS_PER_Q)) + if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) goto failed_qdio; - /* set index of first avalable SBALS / number of available SBALS */ - qdio->req_q.first = 0; - atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); + /* set index of first available SBALS / number of available SBALS */ + qdio->req_q_idx = 0; + atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); + atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); + + if (adapter->scsi_host) { + adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req; + adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8; + } return 0; @@ -426,7 +448,6 @@ failed_establish: void zfcp_qdio_destroy(struct zfcp_qdio *qdio) { - struct qdio_buffer **sbal_req, **sbal_resp; int p; if (!qdio) @@ -435,12 +456,9 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio) if (qdio->adapter->ccw_device) qdio_free(qdio->adapter->ccw_device); - sbal_req = qdio->req_q.sbal; - sbal_resp = qdio->resp_q.sbal; - for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { - free_page((unsigned long) sbal_req[p]); - free_page((unsigned long) sbal_resp[p]); + free_page((unsigned long) qdio->req_q[p]); + free_page((unsigned long) qdio->res_q[p]); } kfree(qdio); @@ -468,3 +486,26 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter) return 0; } +/** + * zfcp_qdio_siosl - Trigger logging in FCP channel + * @adapter: The zfcp_adapter where to trigger logging + * + * Call the cio siosl function to trigger hardware logging. This + * wrapper function sets a flag to ensure hardware logging is only + * triggered once before going through qdio shutdown. + * + * The triggers are always run from qdio tasklet context, so no + * additional synchronization is necessary. + */ +void zfcp_qdio_siosl(struct zfcp_adapter *adapter) +{ + int rc; + + if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED) + return; + + rc = ccw_device_siosl(adapter->ccw_device); + if (!rc) + atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, + &adapter->status); +} diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h index 8cca54631e1..497cd379b0d 100644 --- a/drivers/s390/scsi/zfcp_qdio.h +++ b/drivers/s390/scsi/zfcp_qdio.h @@ -3,7 +3,7 @@ * * Header file for zfcp qdio interface * - * Copyright IBM Corporation 2010 + * Copyright IBM Corp. 2010 */ #ifndef ZFCP_QDIO_H @@ -11,22 +11,17 @@ #include <asm/qdio.h> -/** - * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count - * @sbal: qdio buffers - * @first: index of next free buffer in queue - * @count: number of free buffers in queue - */ -struct zfcp_qdio_queue { - struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; - u8 first; - atomic_t count; -}; +#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE + +/* Max SBALS for chaining */ +#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36 /** * struct zfcp_qdio - basic qdio data structure - * @resp_q: response queue + * @res_q: response queue * @req_q: request queue + * @req_q_idx: index of next free buffer + * @req_q_free: number of free buffers in queue * @stat_lock: lock to protect req_q_util and req_q_time * @req_q_lock: lock to serialize access to request queue * @req_q_time: time of last fill level change @@ -36,8 +31,10 @@ struct zfcp_qdio_queue { * @adapter: adapter used in conjunction with this qdio structure */ struct zfcp_qdio { - struct zfcp_qdio_queue resp_q; - struct zfcp_qdio_queue req_q; + struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q]; + struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q]; + u8 req_q_idx; + atomic_t req_q_free; spinlock_t stat_lock; spinlock_t req_q_lock; unsigned long long req_q_time; @@ -45,10 +42,13 @@ struct zfcp_qdio { atomic_t req_q_full; wait_queue_head_t req_q_wq; struct zfcp_adapter *adapter; + u16 max_sbale_per_sbal; + u16 max_sbale_per_req; }; /** * struct zfcp_qdio_req - qdio queue related values for a request + * @sbtype: sbal type flags for sbale 0 * @sbal_number: number of free sbals * @sbal_first: first sbal for this request * @sbal_last: last sbal for this request @@ -56,9 +56,9 @@ struct zfcp_qdio { * @sbale_curr: current sbale at creation of this request * @sbal_response: sbal used in interrupt * @qdio_outb_usage: usage of outbound queue - * @qdio_inb_usage: usage of inbound queue */ struct zfcp_qdio_req { + u8 sbtype; u8 sbal_number; u8 sbal_first; u8 sbal_last; @@ -66,22 +66,9 @@ struct zfcp_qdio_req { u8 sbale_curr; u8 sbal_response; u16 qdio_outb_usage; - u16 qdio_inb_usage; }; /** - * zfcp_qdio_sbale - return pointer to sbale in qdio queue - * @q: queue where to find sbal - * @sbal_idx: sbal index in queue - * @sbale_idx: sbale index in sbal - */ -static inline struct qdio_buffer_element * -zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) -{ - return &q->sbal[sbal_idx]->element[sbale_idx]; -} - -/** * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request * @qdio: pointer to struct zfcp_qdio * @q_rec: pointer to struct zfcp_qdio_req @@ -90,7 +77,7 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) static inline struct qdio_buffer_element * zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { - return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); + return &qdio->req_q[q_req->sbal_last]->element[0]; } /** @@ -102,8 +89,183 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) static inline struct qdio_buffer_element * zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { - return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, - q_req->sbale_curr); + return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr]; +} + +/** + * zfcp_qdio_req_init - initialize qdio request + * @qdio: request queue where to start putting the request + * @q_req: the qdio request to start + * @req_id: The request id + * @sbtype: type flags to set for all sbals + * @data: First data block + * @len: Length of first data block + * + * This is the start of putting the request into the queue, the last + * step is passing the request to zfcp_qdio_send. The request queue + * lock must be held during the whole process from init to send. + */ +static inline +void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, + unsigned long req_id, u8 sbtype, void *data, u32 len) +{ + struct qdio_buffer_element *sbale; + int count = min(atomic_read(&qdio->req_q_free), + ZFCP_QDIO_MAX_SBALS_PER_REQ); + + q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx; + q_req->sbal_number = 1; + q_req->sbtype = sbtype; + q_req->sbale_curr = 1; + q_req->sbal_limit = (q_req->sbal_first + count - 1) + % QDIO_MAX_BUFFERS_PER_Q; + + sbale = zfcp_qdio_sbale_req(qdio, q_req); + sbale->addr = (void *) req_id; + sbale->eflags = 0; + sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype; + + if (unlikely(!data)) + return; + sbale++; + sbale->addr = data; + sbale->length = len; +} + +/** + * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests + * @qdio: pointer to struct zfcp_qdio + * @q_req: pointer to struct zfcp_queue_req + * + * This is only required for single sbal requests, calling it when + * wrapping around to the next sbal is a bug. + */ +static inline +void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, + void *data, u32 len) +{ + struct qdio_buffer_element *sbale; + + BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1); + q_req->sbale_curr++; + sbale = zfcp_qdio_sbale_curr(qdio, q_req); + sbale->addr = data; + sbale->length = len; +} + +/** + * zfcp_qdio_set_sbale_last - set last entry flag in current sbale + * @qdio: pointer to struct zfcp_qdio + * @q_req: pointer to struct zfcp_queue_req + */ +static inline +void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio, + struct zfcp_qdio_req *q_req) +{ + struct qdio_buffer_element *sbale; + + sbale = zfcp_qdio_sbale_curr(qdio, q_req); + sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY; +} + +/** + * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data + * @sg: The scatterlist where to check the data size + * + * Returns: 1 when one sbale is enough for the data in the scatterlist, + * 0 if not. + */ +static inline +int zfcp_qdio_sg_one_sbale(struct scatterlist *sg) +{ + return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN; +} + +/** + * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal + * @q_req: The current zfcp_qdio_req + */ +static inline +void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio, + struct zfcp_qdio_req *q_req) +{ + q_req->sbale_curr = qdio->max_sbale_per_sbal - 1; +} + +/** + * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req + * @qdio: pointer to struct zfcp_qdio + * @q_req: The current zfcp_qdio_req + * @max_sbals: maximum number of SBALs allowed + */ +static inline +void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, + struct zfcp_qdio_req *q_req, int max_sbals) +{ + int count = min(atomic_read(&qdio->req_q_free), max_sbals); + + q_req->sbal_limit = (q_req->sbal_first + count - 1) % + QDIO_MAX_BUFFERS_PER_Q; +} + +/** + * zfcp_qdio_set_data_div - set data division count + * @qdio: pointer to struct zfcp_qdio + * @q_req: The current zfcp_qdio_req + * @count: The data division count + */ +static inline +void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio, + struct zfcp_qdio_req *q_req, u32 count) +{ + struct qdio_buffer_element *sbale; + + sbale = qdio->req_q[q_req->sbal_first]->element; + sbale->length = count; +} + +/** + * zfcp_qdio_sbale_count - count sbale used + * @sg: pointer to struct scatterlist + */ +static inline +unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg) +{ + unsigned int count = 0; + + for (; sg; sg = sg_next(sg)) + count++; + + return count; +} + +/** + * zfcp_qdio_real_bytes - count bytes used + * @sg: pointer to struct scatterlist + */ +static inline +unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg) +{ + unsigned int real_bytes = 0; + + for (; sg; sg = sg_next(sg)) + real_bytes += sg->length; + + return real_bytes; +} + +/** + * zfcp_qdio_set_scount - set SBAL count value + * @qdio: pointer to struct zfcp_qdio + * @q_req: The current zfcp_qdio_req + */ +static inline +void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) +{ + struct qdio_buffer_element *sbale; + + sbale = qdio->req_q[q_req->sbal_first]->element; + sbale->scount = q_req->sbal_number - 1; } #endif /* ZFCP_QDIO_H */ diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h index a72d1b730ab..7c2c6194dfc 100644 --- a/drivers/s390/scsi/zfcp_reqlist.h +++ b/drivers/s390/scsi/zfcp_reqlist.h @@ -4,7 +4,7 @@ * Data structure and helper functions for tracking pending FSF * requests. * - * Copyright IBM Corporation 2009 + * Copyright IBM Corp. 2009 */ #ifndef ZFCP_REQLIST_H diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index c3c4178888a..7b353647cb9 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -3,15 +3,18 @@ * * Interface to Linux SCSI midlayer. * - * Copyright IBM Corporation 2002, 2010 + * Copyright IBM Corp. 2002, 2013 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/module.h> #include <linux/types.h> +#include <linux/slab.h> #include <scsi/fc/fc_fcp.h> -#include <asm/atomic.h> +#include <scsi/scsi_eh.h> +#include <linux/atomic.h> #include "zfcp_ext.h" #include "zfcp_dbf.h" #include "zfcp_fc.h" @@ -21,6 +24,14 @@ static unsigned int default_depth = 32; module_param_named(queue_depth, default_depth, uint, 0600); MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); +static bool enable_dif; +module_param_named(dif, enable_dif, bool, 0400); +MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); + +static bool allow_lun_scan = 1; +module_param(allow_lun_scan, bool, 0600); +MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs"); + static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) { @@ -40,11 +51,16 @@ static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, return sdev->queue_depth; } -static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) +static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) { - struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; - unit->device = NULL; - put_device(&unit->dev); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + /* if previous slave_alloc returned early, there is nothing to do */ + if (!zfcp_sdev->port) + return; + + zfcp_erp_lun_shutdown_wait(sdev, "scssd_1"); + put_device(&zfcp_sdev->port->dev); } static int zfcp_scsi_slave_configure(struct scsi_device *sdp) @@ -58,47 +74,35 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp) static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) { - struct zfcp_adapter *adapter = - (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; - set_host_byte(scpnt, result); - zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt); + zfcp_dbf_scsi_fail_send(scpnt); scpnt->scsi_done(scpnt); } -static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, - void (*done) (struct scsi_cmnd *)) +static +int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) { - struct zfcp_unit *unit; - struct zfcp_adapter *adapter; - int status, scsi_result, ret; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); + int status, scsi_result, ret; /* reset the status for this request */ scpnt->result = 0; scpnt->host_scribble = NULL; - scpnt->scsi_done = done; - - /* - * figure out adapter and target device - * (stored there by zfcp_scsi_slave_alloc) - */ - adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; - unit = scpnt->device->hostdata; scsi_result = fc_remote_port_chkready(rport); if (unlikely(scsi_result)) { scpnt->result = scsi_result; - zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt); + zfcp_dbf_scsi_fail_send(scpnt); scpnt->scsi_done(scpnt); return 0; } - status = atomic_read(&unit->status); + status = atomic_read(&zfcp_sdev->status); if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && - !(atomic_read(&unit->port->status) & + !(atomic_read(&zfcp_sdev->port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)) { - /* only unit access denied, but port is good + /* only LUN access denied, but port is good * not covered by FC transport, have to fail here */ zfcp_scsi_command_fail(scpnt, DID_ERROR); return 0; @@ -106,8 +110,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { /* This could be either - * open unit pending: this is temporary, will result in - * open unit or ERP_FAILED, so retry command + * open LUN pending: this is temporary, will result in + * open LUN or ERP_FAILED, so retry command * call to rport_delete pending: mimic retry from * fc_remote_port_chkready until rport is BLOCKED */ @@ -115,7 +119,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, return 0; } - ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); + ret = zfcp_fsf_fcp_cmnd(scpnt); if (unlikely(ret == -EBUSY)) return SCSI_MLQUEUE_DEVICE_BUSY; else if (unlikely(ret < 0)) @@ -124,45 +128,43 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, return ret; } -static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, - unsigned int id, u64 lun) +static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) { - unsigned long flags; + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct zfcp_adapter *adapter = + (struct zfcp_adapter *) sdev->host->hostdata[0]; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_port *port; - struct zfcp_unit *unit = NULL; + struct zfcp_unit *unit; + int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE; - read_lock_irqsave(&adapter->port_list_lock, flags); - list_for_each_entry(port, &adapter->port_list, list) { - if (!port->rport || (id != port->rport->scsi_target_id)) - continue; - unit = zfcp_get_unit_by_lun(port, lun); - if (unit) - break; - } - read_unlock_irqrestore(&adapter->port_list_lock, flags); + port = zfcp_get_port_by_wwpn(adapter, rport->port_name); + if (!port) + return -ENXIO; - return unit; -} + unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); + if (unit) + put_device(&unit->dev); -static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) -{ - struct zfcp_adapter *adapter; - struct zfcp_unit *unit; - u64 lun; + if (!unit && !(allow_lun_scan && npiv)) { + put_device(&port->dev); + return -ENXIO; + } - adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; - if (!adapter) - goto out; + zfcp_sdev->port = port; + zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF; + zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF; + zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF; + zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF; + zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF; + zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF; + spin_lock_init(&zfcp_sdev->latencies.lock); - int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun); - unit = zfcp_unit_lookup(adapter, sdp->id, lun); - if (unit) { - sdp->hostdata = unit; - unit->device = sdp; - return 0; - } -out: - return -ENXIO; + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); + zfcp_erp_lun_reopen(sdev, 0, "scsla_1"); + zfcp_erp_wait(port->adapter); + + return 0; } static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) @@ -170,11 +172,10 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) struct Scsi_Host *scsi_host = scpnt->device->host; struct zfcp_adapter *adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; - struct zfcp_unit *unit = scpnt->device->hostdata; struct zfcp_fsf_req *old_req, *abrt_req; unsigned long flags; unsigned long old_reqid = (unsigned long) scpnt->host_scribble; - int retval = SUCCESS; + int retval = SUCCESS, ret; int retry = 3; char *dbf_tag; @@ -184,8 +185,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) old_req = zfcp_reqlist_find(adapter->req_list, old_reqid); if (!old_req) { write_unlock_irqrestore(&adapter->abort_lock, flags); - zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, - old_reqid); + zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL); return FAILED; /* completion could be in progress */ } old_req->data = NULL; @@ -194,55 +194,63 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) write_unlock_irqrestore(&adapter->abort_lock, flags); while (retry--) { - abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit); + abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt); if (abrt_req) break; zfcp_erp_wait(adapter); - fc_block_scsi_eh(scpnt); + ret = fc_block_scsi_eh(scpnt); + if (ret) { + zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL); + return ret; + } if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { - zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, - old_reqid); + zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL); return SUCCESS; } } - if (!abrt_req) + if (!abrt_req) { + zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL); return FAILED; + } wait_for_completion(&abrt_req->completion); if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) - dbf_tag = "okay"; + dbf_tag = "abrt_ok"; else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) - dbf_tag = "lte2"; + dbf_tag = "abrt_nn"; else { - dbf_tag = "fail"; + dbf_tag = "abrt_fa"; retval = FAILED; } - zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid); + zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req); zfcp_fsf_req_free(abrt_req); return retval; } static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) { - struct zfcp_unit *unit = scpnt->device->hostdata; - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct zfcp_fsf_req *fsf_req = NULL; - int retval = SUCCESS; + int retval = SUCCESS, ret; int retry = 3; while (retry--) { - fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags); + fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags); if (fsf_req) break; zfcp_erp_wait(adapter); - fc_block_scsi_eh(scpnt); + ret = fc_block_scsi_eh(scpnt); + if (ret) + return ret; + if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { - zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags); return SUCCESS; } } @@ -252,10 +260,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { - zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); retval = FAILED; } else - zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); zfcp_fsf_req_free(fsf_req); return retval; @@ -273,17 +281,54 @@ static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { - struct zfcp_unit *unit = scpnt->device->hostdata; - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; + int ret; - zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); + zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); - fc_block_scsi_eh(scpnt); + ret = fc_block_scsi_eh(scpnt); + if (ret) + return ret; return SUCCESS; } -int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) +struct scsi_transport_template *zfcp_scsi_transport_template; + +static struct scsi_host_template zfcp_scsi_host_template = { + .module = THIS_MODULE, + .name = "zfcp", + .queuecommand = zfcp_scsi_queuecommand, + .eh_abort_handler = zfcp_scsi_eh_abort_handler, + .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, + .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, + .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, + .slave_alloc = zfcp_scsi_slave_alloc, + .slave_configure = zfcp_scsi_slave_configure, + .slave_destroy = zfcp_scsi_slave_destroy, + .change_queue_depth = zfcp_scsi_change_queue_depth, + .proc_name = "zfcp", + .can_queue = 4096, + .this_id = -1, + .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) + * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2), + /* GCD, adjusted later */ + .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) + * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, + /* GCD, adjusted later */ + .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, + .cmd_per_lun = 1, + .use_clustering = 1, + .shost_attrs = zfcp_sysfs_shost_attrs, + .sdev_attrs = zfcp_sysfs_sdev_attrs, +}; + +/** + * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer + * @adapter: The zfcp adapter to register with the SCSI midlayer + */ +int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter) { struct ccw_dev_id dev_id; @@ -292,7 +337,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) ccw_device_get_id(adapter->ccw_device, &dev_id); /* register adapter as SCSI host with mid layer of SCSI stack */ - adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template, + adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template, sizeof (struct zfcp_adapter *)); if (!adapter->scsi_host) { dev_err(&adapter->ccw_device->dev, @@ -302,12 +347,12 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) } /* tell the SCSI stack some characteristics of this adapter */ - adapter->scsi_host->max_id = 1; - adapter->scsi_host->max_lun = 1; + adapter->scsi_host->max_id = 511; + adapter->scsi_host->max_lun = 0xFFFFFFFF; adapter->scsi_host->max_channel = 0; adapter->scsi_host->unique_id = dev_id.devno; adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ - adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; + adapter->scsi_host->transportt = zfcp_scsi_transport_template; adapter->scsi_host->hostdata[0] = (unsigned long) adapter; @@ -319,7 +364,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) return 0; } -void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) +/** + * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer + * @adapter: The zfcp adapter to unregister. + */ +void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter) { struct Scsi_Host *shost; struct zfcp_port *port; @@ -337,8 +386,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) scsi_remove_host(shost); scsi_host_put(shost); adapter->scsi_host = NULL; - - return; } static struct fc_host_statistics* @@ -497,8 +544,10 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) * @rport: The FC rport where to teminate I/O * * Abort all pending SCSI commands for a port by closing the - * port. Using a reopen avoiding a conflict with a shutdown - * overwriting a reopen. + * port. Using a reopen avoids a conflict with a shutdown + * overwriting a reopen. The "forced" ensures that a disappeared port + * is not opened again as valid due to the cached plogi data in + * non-NPIV mode. */ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) { @@ -510,7 +559,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) port = zfcp_get_port_by_wwpn(adapter, rport->port_name); if (port) { - zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); + zfcp_erp_port_forced_reopen(port, 0, "sctrpi1"); put_device(&port->dev); } } @@ -539,6 +588,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) rport->maxframe_size = port->maxframe_size; rport->supported_classes = port->supported_classes; port->rport = rport; + port->starget_id = rport->scsi_target_id; + + zfcp_unit_queue_scsi_scan(port); } static void zfcp_scsi_rport_block(struct zfcp_port *port) @@ -601,22 +653,50 @@ void zfcp_scsi_rport_work(struct work_struct *work) put_device(&port->dev); } - -void zfcp_scsi_scan(struct work_struct *work) +/** + * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host + * @adapter: The adapter where to configure DIF/DIX for the SCSI host + */ +void zfcp_scsi_set_prot(struct zfcp_adapter *adapter) { - struct zfcp_unit *unit = container_of(work, struct zfcp_unit, - scsi_work); - struct fc_rport *rport; - - flush_work(&unit->port->rport_work); - rport = unit->port->rport; + unsigned int mask = 0; + unsigned int data_div; + struct Scsi_Host *shost = adapter->scsi_host; + + data_div = atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED; + + if (enable_dif && + adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1) + mask |= SHOST_DIF_TYPE1_PROTECTION; + + if (enable_dif && data_div && + adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { + mask |= SHOST_DIX_TYPE1_PROTECTION; + scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); + shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2; + shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2; + shost->max_sectors = shost->sg_tablesize * 8; + } - if (rport && rport->port_state == FC_PORTSTATE_ONLINE) - scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, - scsilun_to_int((struct scsi_lun *) - &unit->fcp_lun), 0); + scsi_host_set_prot(shost, mask); +} - put_device(&unit->dev); +/** + * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error + * @scmd: The SCSI command to report the error for + * @ascq: The ASCQ to put in the sense buffer + * + * See the error handling in sd_done for the sense codes used here. + * Set DID_SOFT_ERROR to retry the request, if possible. + */ +void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq) +{ + scsi_build_sense_buffer(1, scmd->sense_buffer, + ILLEGAL_REQUEST, 0x10, ascq); + set_driver_byte(scmd, DRIVER_SENSE); + scmd->result |= SAM_STAT_CHECK_CONDITION; + set_host_byte(scmd, DID_SOFT_ERROR); } struct fc_function_template zfcp_transport_functions = { @@ -646,33 +726,8 @@ struct fc_function_template zfcp_transport_functions = { /* no functions registered for following dynamic attributes but directly set by LLDD */ .show_host_port_type = 1, + .show_host_symbolic_name = 1, .show_host_speed = 1, .show_host_port_id = 1, - .disable_target_scan = 1, .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), }; - -struct zfcp_data zfcp_data = { - .scsi_host_template = { - .name = "zfcp", - .module = THIS_MODULE, - .proc_name = "zfcp", - .change_queue_depth = zfcp_scsi_change_queue_depth, - .slave_alloc = zfcp_scsi_slave_alloc, - .slave_configure = zfcp_scsi_slave_configure, - .slave_destroy = zfcp_scsi_slave_destroy, - .queuecommand = zfcp_scsi_queuecommand, - .eh_abort_handler = zfcp_scsi_eh_abort_handler, - .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, - .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, - .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, - .can_queue = 4096, - .this_id = -1, - .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, - .cmd_per_lun = 1, - .use_clustering = 1, - .sdev_attrs = zfcp_sysfs_sdev_attrs, - .max_sectors = (ZFCP_MAX_SBALES_PER_REQ * 8), - .shost_attrs = zfcp_sysfs_shost_attrs, - }, -}; diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index a43035d4bd7..672b57219e1 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -3,12 +3,13 @@ * * sysfs attributes. * - * Copyright IBM Corporation 2008, 2010 + * Copyright IBM Corp. 2008, 2010 */ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/slab.h> #include "zfcp_ext.h" #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ @@ -26,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ zfcp_sysfs_##_feat##_##_name##_show, NULL); +#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \ +static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ + struct device_attribute *at,\ + char *buf) \ +{ \ + return sprintf(buf, _format, _value); \ +} \ +static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ + zfcp_sysfs_##_feat##_##_name##_show, NULL); + #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ struct device_attribute *at,\ @@ -67,63 +78,91 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n", ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", - atomic_read(&unit->status)); + zfcp_unit_sdev_status(unit)); ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", - (atomic_read(&unit->status) & + (zfcp_unit_sdev_status(unit) & ZFCP_STATUS_COMMON_ERP_INUSE) != 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", - (atomic_read(&unit->status) & + (zfcp_unit_sdev_status(unit) & ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); -ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n", - (atomic_read(&unit->status) & - ZFCP_STATUS_UNIT_SHARED) != 0); -ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n", - (atomic_read(&unit->status) & - ZFCP_STATUS_UNIT_READONLY) != 0); - -#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \ -static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \ - struct device_attribute *attr, \ - char *buf) \ -{ \ - struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ - \ - if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ - return sprintf(buf, "1\n"); \ - else \ - return sprintf(buf, "0\n"); \ -} \ -static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ - struct device_attribute *attr,\ - const char *buf, size_t count)\ -{ \ - struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ - unsigned long val; \ - int retval = 0; \ - \ - if (!(_feat && get_device(&_feat->dev))) \ - return -EBUSY; \ - \ - if (strict_strtoul(buf, 0, &val) || val != 0) { \ - retval = -EINVAL; \ - goto out; \ - } \ - \ - zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \ - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\ - zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \ - _reopen_id, NULL); \ - zfcp_erp_wait(_adapter); \ -out: \ - put_device(&_feat->dev); \ - return retval ? retval : (ssize_t) count; \ -} \ -static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ - zfcp_sysfs_##_feat##_failed_show, \ - zfcp_sysfs_##_feat##_failed_store); +ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0); +ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0); + +static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); + + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + return sprintf(buf, "1\n"); + + return sprintf(buf, "0\n"); +} + +static ssize_t zfcp_sysfs_port_failed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); + unsigned long val; + + if (kstrtoul(buf, 0, &val) || val != 0) + return -EINVAL; + + zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING); + zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2"); + zfcp_erp_wait(port->adapter); + + return count; +} +static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO, + zfcp_sysfs_port_failed_show, + zfcp_sysfs_port_failed_store); + +static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); + struct scsi_device *sdev; + unsigned int status, failed = 1; + + sdev = zfcp_unit_sdev(unit); + if (sdev) { + status = atomic_read(&sdev_to_zfcp(sdev)->status); + failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0; + scsi_device_put(sdev); + } + + return sprintf(buf, "%d\n", failed); +} + +static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); + unsigned long val; + struct scsi_device *sdev; -ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2"); -ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2"); + if (kstrtoul(buf, 0, &val) || val != 0) + return -EINVAL; + + sdev = zfcp_unit_sdev(unit); + if (sdev) { + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); + zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, + "syufai2"); + zfcp_erp_wait(unit->port->adapter); + } else + zfcp_unit_scsi_scan(unit); + + return count; +} +static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO, + zfcp_sysfs_unit_failed_show, + zfcp_sysfs_unit_failed_store); static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, struct device_attribute *attr, @@ -157,15 +196,14 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev, if (!adapter) return -ENODEV; - if (strict_strtoul(buf, 0, &val) || val != 0) { + if (kstrtoul(buf, 0, &val) || val != 0) { retval = -EINVAL; goto out; } - zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, - "syafai2", NULL); + "syafai2"); zfcp_erp_wait(adapter); out: zfcp_ccw_adapter_put(adapter); @@ -195,6 +233,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, zfcp_sysfs_port_rescan_store); +DEFINE_MUTEX(zfcp_sysfs_port_units_mutex); + static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -208,7 +248,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, if (!adapter) return -ENODEV; - if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) + if (kstrtoull(buf, 0, (unsigned long long *) &wwpn)) goto out; port = zfcp_get_port_by_wwpn(adapter, wwpn); @@ -217,14 +257,24 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, else retval = 0; + mutex_lock(&zfcp_sysfs_port_units_mutex); + if (atomic_read(&port->units) > 0) { + retval = -EBUSY; + mutex_unlock(&zfcp_sysfs_port_units_mutex); + goto out; + } + /* port is about to be removed, so no more unit_add */ + atomic_set(&port->units, -1); + mutex_unlock(&zfcp_sysfs_port_units_mutex); + write_lock_irq(&adapter->port_list_lock); list_del(&port->list); write_unlock_irq(&adapter->port_list_lock); put_device(&port->dev); - zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); - zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); + zfcp_erp_port_shutdown(port, 0, "syprs_1"); + device_unregister(&port->dev); out: zfcp_ccw_adapter_put(adapter); return retval ? retval : (ssize_t) count; @@ -256,28 +306,17 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, const char *buf, size_t count) { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); - struct zfcp_unit *unit; u64 fcp_lun; - int retval = -EINVAL; - - if (!(port && get_device(&port->dev))) - return -EBUSY; + int retval; - if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) - goto out; + if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun)) + return -EINVAL; - unit = zfcp_unit_enqueue(port, fcp_lun); - if (IS_ERR(unit)) - goto out; - else - retval = 0; + retval = zfcp_unit_add(port, fcp_lun); + if (retval) + return retval; - zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); - zfcp_erp_wait(unit->port->adapter); - flush_work(&unit->scsi_work); -out: - put_device(&port->dev); - return retval ? retval : (ssize_t) count; + return count; } static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); @@ -286,36 +325,15 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, const char *buf, size_t count) { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); - struct zfcp_unit *unit; u64 fcp_lun; - int retval = -EINVAL; - if (!(port && get_device(&port->dev))) - return -EBUSY; + if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun)) + return -EINVAL; - if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) - goto out; + if (zfcp_unit_remove(port, fcp_lun)) + return -EINVAL; - unit = zfcp_get_unit_by_lun(port, fcp_lun); - if (!unit) - goto out; - else - retval = 0; - - /* wait for possible timeout during SCSI probe */ - flush_work(&unit->scsi_work); - - write_lock_irq(&port->unit_list_lock); - list_del(&unit->list); - write_unlock_irq(&port->unit_list_lock); - - put_device(&unit->dev); - - zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); - zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); -out: - put_device(&port->dev); - return retval ? retval : (ssize_t) count; + return count; } static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); @@ -328,13 +346,13 @@ static struct attribute *zfcp_port_attrs[] = { &dev_attr_port_access_denied.attr, NULL }; - -/** - * zfcp_sysfs_port_attrs - sysfs attributes for all other ports - */ -struct attribute_group zfcp_sysfs_port_attrs = { +static struct attribute_group zfcp_port_attr_group = { .attrs = zfcp_port_attrs, }; +const struct attribute_group *zfcp_port_attr_groups[] = { + &zfcp_port_attr_group, + NULL, +}; static struct attribute *zfcp_unit_attrs[] = { &dev_attr_unit_failed.attr, @@ -345,10 +363,13 @@ static struct attribute *zfcp_unit_attrs[] = { &dev_attr_unit_access_readonly.attr, NULL }; - -struct attribute_group zfcp_sysfs_unit_attrs = { +static struct attribute_group zfcp_unit_attr_group = { .attrs = zfcp_unit_attrs, }; +const struct attribute_group *zfcp_unit_attr_groups[] = { + &zfcp_unit_attr_group, + NULL, +}; #define ZFCP_DEFINE_LATENCY_ATTR(_name) \ static ssize_t \ @@ -356,9 +377,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) { \ struct scsi_device *sdev = to_scsi_device(dev); \ - struct zfcp_unit *unit = sdev->hostdata; \ - struct zfcp_latencies *lat = &unit->latencies; \ - struct zfcp_adapter *adapter = unit->port->adapter; \ + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ + struct zfcp_latencies *lat = &zfcp_sdev->latencies; \ + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \ unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ \ spin_lock_bh(&lat->lock); \ @@ -387,8 +408,8 @@ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \ const char *buf, size_t count) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ - struct zfcp_unit *unit = sdev->hostdata; \ - struct zfcp_latencies *lat = &unit->latencies; \ + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ + struct zfcp_latencies *lat = &zfcp_sdev->latencies; \ unsigned long flags; \ \ spin_lock_irqsave(&lat->lock, flags); \ @@ -416,19 +437,28 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \ struct device_attribute *attr,\ char *buf) \ { \ - struct scsi_device *sdev = to_scsi_device(dev); \ - struct zfcp_unit *unit = sdev->hostdata; \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ + struct zfcp_port *port = zfcp_sdev->port; \ \ return sprintf(buf, _format, _value); \ } \ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", - dev_name(&unit->port->adapter->ccw_device->dev)); + dev_name(&port->adapter->ccw_device->dev)); ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", - (unsigned long long) unit->port->wwpn); -ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", - (unsigned long long) unit->fcp_lun); + (unsigned long long) port->wwpn); + +static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev)); +} +static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL); struct device_attribute *zfcp_sysfs_sdev_attrs[] = { &dev_attr_fcp_lun, diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c new file mode 100644 index 00000000000..39f5446f721 --- /dev/null +++ b/drivers/s390/scsi/zfcp_unit.c @@ -0,0 +1,255 @@ +/* + * zfcp device driver + * + * Tracking of manually configured LUNs and helper functions to + * register the LUNs with the SCSI midlayer. + * + * Copyright IBM Corp. 2010 + */ + +#include "zfcp_def.h" +#include "zfcp_ext.h" + +/** + * zfcp_unit_scsi_scan - Register LUN with SCSI midlayer + * @unit: The zfcp LUN/unit to register + * + * When the SCSI midlayer is not allowed to automatically scan and + * attach SCSI devices, zfcp has to register the single devices with + * the SCSI midlayer. + */ +void zfcp_unit_scsi_scan(struct zfcp_unit *unit) +{ + struct fc_rport *rport = unit->port->rport; + unsigned int lun; + + lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun); + + if (rport && rport->port_state == FC_PORTSTATE_ONLINE) + scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1); +} + +static void zfcp_unit_scsi_scan_work(struct work_struct *work) +{ + struct zfcp_unit *unit = container_of(work, struct zfcp_unit, + scsi_work); + + zfcp_unit_scsi_scan(unit); + put_device(&unit->dev); +} + +/** + * zfcp_unit_queue_scsi_scan - Register configured units on port + * @port: The zfcp_port where to register units + * + * After opening a port, all units configured on this port have to be + * registered with the SCSI midlayer. This function should be called + * after calling fc_remote_port_add, so that the fc_rport is already + * ONLINE and the call to scsi_scan_target runs the same way as the + * call in the FC transport class. + */ +void zfcp_unit_queue_scsi_scan(struct zfcp_port *port) +{ + struct zfcp_unit *unit; + + read_lock_irq(&port->unit_list_lock); + list_for_each_entry(unit, &port->unit_list, list) { + get_device(&unit->dev); + if (scsi_queue_work(port->adapter->scsi_host, + &unit->scsi_work) <= 0) + put_device(&unit->dev); + } + read_unlock_irq(&port->unit_list_lock); +} + +static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun) +{ + struct zfcp_unit *unit; + + list_for_each_entry(unit, &port->unit_list, list) + if (unit->fcp_lun == fcp_lun) { + get_device(&unit->dev); + return unit; + } + + return NULL; +} + +/** + * zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN + * @port: zfcp_port where to look for the unit + * @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit + * + * If zfcp_unit is found, a reference is acquired that has to be + * released later. + * + * Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit + * with the specified FCP LUN. + */ +struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun) +{ + struct zfcp_unit *unit; + + read_lock_irq(&port->unit_list_lock); + unit = _zfcp_unit_find(port, fcp_lun); + read_unlock_irq(&port->unit_list_lock); + return unit; +} + +/** + * zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit. + * @dev: pointer to device in zfcp_unit + */ +static void zfcp_unit_release(struct device *dev) +{ + struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); + + atomic_dec(&unit->port->units); + kfree(unit); +} + +/** + * zfcp_unit_enqueue - enqueue unit to unit list of a port. + * @port: pointer to port where unit is added + * @fcp_lun: FCP LUN of unit to be enqueued + * Returns: 0 success + * + * Sets up some unit internal structures and creates sysfs entry. + */ +int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) +{ + struct zfcp_unit *unit; + int retval = 0; + + mutex_lock(&zfcp_sysfs_port_units_mutex); + if (atomic_read(&port->units) == -1) { + /* port is already gone */ + retval = -ENODEV; + goto out; + } + + unit = zfcp_unit_find(port, fcp_lun); + if (unit) { + put_device(&unit->dev); + retval = -EEXIST; + goto out; + } + + unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); + if (!unit) { + retval = -ENOMEM; + goto out; + } + + unit->port = port; + unit->fcp_lun = fcp_lun; + unit->dev.parent = &port->dev; + unit->dev.release = zfcp_unit_release; + unit->dev.groups = zfcp_unit_attr_groups; + INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work); + + if (dev_set_name(&unit->dev, "0x%016llx", + (unsigned long long) fcp_lun)) { + kfree(unit); + retval = -ENOMEM; + goto out; + } + + if (device_register(&unit->dev)) { + put_device(&unit->dev); + retval = -ENOMEM; + goto out; + } + + atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */ + + write_lock_irq(&port->unit_list_lock); + list_add_tail(&unit->list, &port->unit_list); + write_unlock_irq(&port->unit_list_lock); + + zfcp_unit_scsi_scan(unit); + +out: + mutex_unlock(&zfcp_sysfs_port_units_mutex); + return retval; +} + +/** + * zfcp_unit_sdev - Return SCSI device for zfcp_unit + * @unit: The zfcp_unit where to get the SCSI device for + * + * Returns: scsi_device pointer on success, NULL if there is no SCSI + * device for this zfcp_unit + * + * On success, the caller also holds a reference to the SCSI device + * that must be released with scsi_device_put. + */ +struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit) +{ + struct Scsi_Host *shost; + struct zfcp_port *port; + unsigned int lun; + + lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun); + port = unit->port; + shost = port->adapter->scsi_host; + return scsi_device_lookup(shost, 0, port->starget_id, lun); +} + +/** + * zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device + * @unit: The unit to lookup the SCSI device for + * + * Returns the zfcp LUN status field of the SCSI device if the SCSI device + * for the zfcp_unit exists, 0 otherwise. + */ +unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit) +{ + unsigned int status = 0; + struct scsi_device *sdev; + struct zfcp_scsi_dev *zfcp_sdev; + + sdev = zfcp_unit_sdev(unit); + if (sdev) { + zfcp_sdev = sdev_to_zfcp(sdev); + status = atomic_read(&zfcp_sdev->status); + scsi_device_put(sdev); + } + + return status; +} + +/** + * zfcp_unit_remove - Remove entry from list of configured units + * @port: The port where to remove the unit from the configuration + * @fcp_lun: The 64 bit LUN of the unit to remove + * + * Returns: -EINVAL if a unit with the specified LUN does not exist, + * 0 on success. + */ +int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun) +{ + struct zfcp_unit *unit; + struct scsi_device *sdev; + + write_lock_irq(&port->unit_list_lock); + unit = _zfcp_unit_find(port, fcp_lun); + if (unit) + list_del(&unit->list); + write_unlock_irq(&port->unit_list_lock); + + if (!unit) + return -EINVAL; + + sdev = zfcp_unit_sdev(unit); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } + + put_device(&unit->dev); + + device_unregister(&unit->dev); + + return 0; +} |
