diff options
Diffstat (limited to 'drivers/s390/cio/css.c')
| -rw-r--r-- | drivers/s390/cio/css.c | 282 | 
1 files changed, 164 insertions, 118 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index a5050e21715..0268e5fd59b 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -35,6 +35,7 @@ int css_init_done = 0;  int max_ssid;  struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; +static struct bus_type css_bus_type;  int  for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) @@ -68,7 +69,8 @@ static int call_fn_known_sch(struct device *dev, void *data)  	struct cb_data *cb = data;  	int rc = 0; -	idset_sch_del(cb->set, sch->schid); +	if (cb->set) +		idset_sch_del(cb->set, sch->schid);  	if (cb->fn_known_sch)  		rc = cb->fn_known_sch(sch, cb->data);  	return rc; @@ -114,6 +116,13 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),  	cb.fn_known_sch = fn_known;  	cb.fn_unknown_sch = fn_unknown; +	if (fn_known && !fn_unknown) { +		/* Skip idset allocation in case of known-only loop. */ +		cb.set = NULL; +		return bus_for_each_dev(&css_bus_type, NULL, &cb, +					call_fn_known_sch); +	} +  	cb.set = idset_sch_new();  	if (!cb.set)  		/* fall back to brute force scanning in case of oom */ @@ -136,37 +145,53 @@ out:  static void css_sch_todo(struct work_struct *work); -static struct subchannel * -css_alloc_subchannel(struct subchannel_id schid) +static int css_sch_create_locks(struct subchannel *sch) +{ +	sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); +	if (!sch->lock) +		return -ENOMEM; + +	spin_lock_init(sch->lock); +	mutex_init(&sch->reg_mutex); + +	return 0; +} + +static void css_subchannel_release(struct device *dev) +{ +	struct subchannel *sch = to_subchannel(dev); + +	sch->config.intparm = 0; +	cio_commit_config(sch); +	kfree(sch->lock); +	kfree(sch); +} + +struct subchannel *css_alloc_subchannel(struct subchannel_id schid)  {  	struct subchannel *sch;  	int ret; -	sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); -	if (sch == NULL) +	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); +	if (!sch)  		return ERR_PTR(-ENOMEM); -	ret = cio_validate_subchannel (sch, schid); -	if (ret < 0) { -		kfree(sch); -		return ERR_PTR(ret); -	} + +	ret = cio_validate_subchannel(sch, schid); +	if (ret < 0) +		goto err; + +	ret = css_sch_create_locks(sch); +	if (ret) +		goto err; +  	INIT_WORK(&sch->todo_work, css_sch_todo); +	sch->dev.release = &css_subchannel_release; +	device_initialize(&sch->dev);  	return sch; -} -static void -css_subchannel_release(struct device *dev) -{ -	struct subchannel *sch; - -	sch = to_subchannel(dev); -	if (!cio_is_console(sch->schid)) { -		/* Reset intparm to zeroes. */ -		sch->config.intparm = 0; -		cio_commit_config(sch); -		kfree(sch->lock); -		kfree(sch); -	} +err: +	kfree(sch); +	return ERR_PTR(ret);  }  static int css_sch_device_register(struct subchannel *sch) @@ -176,7 +201,7 @@ static int css_sch_device_register(struct subchannel *sch)  	mutex_lock(&sch->reg_mutex);  	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,  		     sch->schid.sch_no); -	ret = device_register(&sch->dev); +	ret = device_add(&sch->dev);  	mutex_unlock(&sch->reg_mutex);  	return ret;  } @@ -194,51 +219,6 @@ void css_sch_device_unregister(struct subchannel *sch)  }  EXPORT_SYMBOL_GPL(css_sch_device_unregister); -static void css_sch_todo(struct work_struct *work) -{ -	struct subchannel *sch; -	enum sch_todo todo; - -	sch = container_of(work, struct subchannel, todo_work); -	/* Find out todo. */ -	spin_lock_irq(sch->lock); -	todo = sch->todo; -	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, -		      sch->schid.sch_no, todo); -	sch->todo = SCH_TODO_NOTHING; -	spin_unlock_irq(sch->lock); -	/* Perform todo. */ -	if (todo == SCH_TODO_UNREG) -		css_sch_device_unregister(sch); -	/* Release workqueue ref. */ -	put_device(&sch->dev); -} - -/** - * css_sched_sch_todo - schedule a subchannel operation - * @sch: subchannel - * @todo: todo - * - * Schedule the operation identified by @todo to be performed on the slow path - * workqueue. Do nothing if another operation with higher priority is already - * scheduled. Needs to be called with subchannel lock held. - */ -void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) -{ -	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", -		      sch->schid.ssid, sch->schid.sch_no, todo); -	if (sch->todo >= todo) -		return; -	/* Get workqueue ref. */ -	if (!get_device(&sch->dev)) -		return; -	sch->todo = todo; -	if (!queue_work(cio_work_q, &sch->todo_work)) { -		/* Already queued, release workqueue ref. */ -		put_device(&sch->dev); -	} -} -  static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)  {  	int i; @@ -272,16 +252,11 @@ void css_update_ssd_info(struct subchannel *sch)  {  	int ret; -	if (cio_is_console(sch->schid)) { -		/* Console is initialized too early for functions requiring -		 * memory allocation. */ +	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); +	if (ret)  		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); -	} else { -		ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); -		if (ret) -			ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); -		ssd_register_chpids(&sch->ssd_info); -	} + +	ssd_register_chpids(&sch->ssd_info);  }  static ssize_t type_show(struct device *dev, struct device_attribute *attr, @@ -319,14 +294,13 @@ static const struct attribute_group *default_subch_attr_groups[] = {  	NULL,  }; -static int css_register_subchannel(struct subchannel *sch) +int css_register_subchannel(struct subchannel *sch)  {  	int ret;  	/* Initialize the subchannel structure */  	sch->dev.parent = &channel_subsystems[0]->device;  	sch->dev.bus = &css_bus_type; -	sch->dev.release = &css_subchannel_release;  	sch->dev.groups = default_subch_attr_groups;  	/*  	 * We don't want to generate uevents for I/O subchannels that don't @@ -358,23 +332,19 @@ static int css_register_subchannel(struct subchannel *sch)  	return ret;  } -int css_probe_device(struct subchannel_id schid) +static int css_probe_device(struct subchannel_id schid)  { -	int ret;  	struct subchannel *sch; +	int ret; + +	sch = css_alloc_subchannel(schid); +	if (IS_ERR(sch)) +		return PTR_ERR(sch); -	if (cio_is_console(schid)) -		sch = cio_get_console_subchannel(); -	else { -		sch = css_alloc_subchannel(schid); -		if (IS_ERR(sch)) -			return PTR_ERR(sch); -	}  	ret = css_register_subchannel(sch); -	if (ret) { -		if (!cio_is_console(schid)) -			put_device(&sch->dev); -	} +	if (ret) +		put_device(&sch->dev); +  	return ret;  } @@ -421,7 +391,11 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)  		/* Will be done on the slow path. */  		return -EAGAIN;  	} -	if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { +	if (stsch_err(schid, &schib)) { +		/* Subchannel is not provided. */ +		return -ENXIO; +	} +	if (!css_sch_is_valid(&schib)) {  		/* Unusable - ignore. */  		return 0;  	} @@ -465,6 +439,66 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)  		css_schedule_eval(schid);  } +/** + * css_sched_sch_todo - schedule a subchannel operation + * @sch: subchannel + * @todo: todo + * + * Schedule the operation identified by @todo to be performed on the slow path + * workqueue. Do nothing if another operation with higher priority is already + * scheduled. Needs to be called with subchannel lock held. + */ +void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) +{ +	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", +		      sch->schid.ssid, sch->schid.sch_no, todo); +	if (sch->todo >= todo) +		return; +	/* Get workqueue ref. */ +	if (!get_device(&sch->dev)) +		return; +	sch->todo = todo; +	if (!queue_work(cio_work_q, &sch->todo_work)) { +		/* Already queued, release workqueue ref. */ +		put_device(&sch->dev); +	} +} +EXPORT_SYMBOL_GPL(css_sched_sch_todo); + +static void css_sch_todo(struct work_struct *work) +{ +	struct subchannel *sch; +	enum sch_todo todo; +	int ret; + +	sch = container_of(work, struct subchannel, todo_work); +	/* Find out todo. */ +	spin_lock_irq(sch->lock); +	todo = sch->todo; +	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, +		      sch->schid.sch_no, todo); +	sch->todo = SCH_TODO_NOTHING; +	spin_unlock_irq(sch->lock); +	/* Perform todo. */ +	switch (todo) { +	case SCH_TODO_NOTHING: +		break; +	case SCH_TODO_EVAL: +		ret = css_evaluate_known_subchannel(sch, 1); +		if (ret == -EAGAIN) { +			spin_lock_irq(sch->lock); +			css_sched_sch_todo(sch, todo); +			spin_unlock_irq(sch->lock); +		} +		break; +	case SCH_TODO_UNREG: +		css_sch_device_unregister(sch); +		break; +	} +	/* Release workqueue ref. */ +	put_device(&sch->dev); +} +  static struct idset *slow_subchannel_set;  static spinlock_t slow_subchannel_lock;  static wait_queue_head_t css_eval_wq; @@ -520,10 +554,16 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)  		case -ENOMEM:  		case -EIO:  			/* These should abort looping */ +			spin_lock_irq(&slow_subchannel_lock); +			idset_sch_del_subseq(slow_subchannel_set, schid); +			spin_unlock_irq(&slow_subchannel_lock);  			break;  		default:  			rc = 0;  		} +		/* Allow scheduling here since the containing loop might +		 * take a while.  */ +		cond_resched();  	}  	return rc;  } @@ -543,7 +583,7 @@ static void css_slow_path_func(struct work_struct *unused)  	spin_unlock_irqrestore(&slow_subchannel_lock, flags);  } -static DECLARE_WORK(slow_path_work, css_slow_path_func); +static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);  struct workqueue_struct *cio_work_q;  void css_schedule_eval(struct subchannel_id schid) @@ -553,7 +593,7 @@ void css_schedule_eval(struct subchannel_id schid)  	spin_lock_irqsave(&slow_subchannel_lock, flags);  	idset_sch_add(slow_subchannel_set, schid);  	atomic_set(&css_eval_scheduled, 1); -	queue_work(cio_work_q, &slow_path_work); +	queue_delayed_work(cio_work_q, &slow_path_work, 0);  	spin_unlock_irqrestore(&slow_subchannel_lock, flags);  } @@ -564,7 +604,7 @@ void css_schedule_eval_all(void)  	spin_lock_irqsave(&slow_subchannel_lock, flags);  	idset_fill(slow_subchannel_set);  	atomic_set(&css_eval_scheduled, 1); -	queue_work(cio_work_q, &slow_path_work); +	queue_delayed_work(cio_work_q, &slow_path_work, 0);  	spin_unlock_irqrestore(&slow_subchannel_lock, flags);  } @@ -577,7 +617,7 @@ static int __unset_registered(struct device *dev, void *data)  	return 0;  } -static void css_schedule_eval_all_unreg(void) +void css_schedule_eval_all_unreg(unsigned long delay)  {  	unsigned long flags;  	struct idset *unreg_set; @@ -595,7 +635,7 @@ static void css_schedule_eval_all_unreg(void)  	spin_lock_irqsave(&slow_subchannel_lock, flags);  	idset_add_set(slow_subchannel_set, unreg_set);  	atomic_set(&css_eval_scheduled, 1); -	queue_work(cio_work_q, &slow_path_work); +	queue_delayed_work(cio_work_q, &slow_path_work, delay);  	spin_unlock_irqrestore(&slow_subchannel_lock, flags);  	idset_free(unreg_set);  } @@ -608,7 +648,8 @@ void css_wait_for_slow_path(void)  /* Schedule reprobing of all unregistered subchannels. */  void css_schedule_reprobe(void)  { -	css_schedule_eval_all_unreg(); +	/* Schedule with a delay to allow merging of subsequent calls. */ +	css_schedule_eval_all_unreg(1 * HZ);  }  EXPORT_SYMBOL_GPL(css_schedule_reprobe); @@ -618,6 +659,7 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);  static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)  {  	struct subchannel_id mchk_schid; +	struct subchannel *sch;  	if (overflow) {  		css_schedule_eval_all(); @@ -635,8 +677,15 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)  	init_subchannel_id(&mchk_schid);  	mchk_schid.sch_no = crw0->rsid;  	if (crw1) -		mchk_schid.ssid = (crw1->rsid >> 8) & 3; +		mchk_schid.ssid = (crw1->rsid >> 4) & 3; +	if (crw0->erc == CRW_ERC_PMOD) { +		sch = get_subchannel_by_schid(mchk_schid); +		if (sch) { +			css_update_ssd_info(sch); +			put_device(&sch->dev); +		} +	}  	/*  	 * Since we are always presented with IPI in the CRW, we have to  	 * use stsch() to find out if the subchannel in question has come @@ -705,7 +754,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,  	int ret;  	unsigned long val; -	ret = strict_strtoul(buf, 16, &val); +	ret = kstrtoul(buf, 16, &val);  	if (ret)  		return ret;  	mutex_lock(&css->mutex); @@ -741,7 +790,7 @@ static int __init setup_css(int nr)  	css->pseudo_subchannel->dev.release = css_subchannel_release;  	dev_set_name(&css->pseudo_subchannel->dev, "defunct");  	mutex_init(&css->pseudo_subchannel->reg_mutex); -	ret = cio_create_sch_lock(css->pseudo_subchannel); +	ret = css_sch_create_locks(css->pseudo_subchannel);  	if (ret) {  		kfree(css->pseudo_subchannel);  		return ret; @@ -751,7 +800,7 @@ static int __init setup_css(int nr)  	css->cssid = nr;  	dev_set_name(&css->device, "css%x", nr);  	css->device.release = channel_subsystem_release; -	tod_high = (u32) (get_clock() >> 32); +	tod_high = (u32) (get_tod_clock() >> 32);  	css_generate_pgid(css, tod_high);  	return 0;  } @@ -805,8 +854,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,  				mutex_unlock(&css->mutex);  				continue;  			} -			if (__chsc_do_secm(css, 0)) -				ret = NOTIFY_BAD; +			ret = __chsc_do_secm(css, 0); +			ret = notifier_from_errno(ret);  			mutex_unlock(&css->mutex);  		}  		break; @@ -822,8 +871,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,  				mutex_unlock(&css->mutex);  				continue;  			} -			if (__chsc_do_secm(css, 1)) -				ret = NOTIFY_BAD; +			ret = __chsc_do_secm(css, 1); +			ret = notifier_from_errno(ret);  			mutex_unlock(&css->mutex);  		}  		/* search for subchannels, which appeared during hibernation */ @@ -841,8 +890,7 @@ static struct notifier_block css_power_notifier = {  /*   * Now that the driver core is running, we can setup our channel subsystem. - * The struct subchannel's are created during probing (except for the - * static console subchannel). + * The struct subchannel's are created during probing.   */  static int __init css_bus_init(void)  { @@ -1021,6 +1069,8 @@ int css_complete_work(void)   */  static int __init channel_subsystem_init_sync(void)  { +	/* Register subchannels which are already in use. */ +	cio_register_early_subchannels();  	/* Start initial subchannel evaluation. */  	css_schedule_eval_all();  	css_complete_work(); @@ -1036,9 +1086,8 @@ void channel_subsystem_reinit(void)  	chsc_enable_facility(CHSC_SDA_OC_MSS);  	chp_id_for_each(&chpid) {  		chp = chpid_to_chp(chpid); -		if (!chp) -			continue; -		chsc_determine_base_channel_path_desc(chpid, &chp->desc); +		if (chp) +			chp_update_desc(chp);  	}  } @@ -1206,7 +1255,7 @@ static const struct dev_pm_ops css_pm_ops = {  	.restore = css_pm_restore,  }; -struct bus_type css_bus_type = { +static struct bus_type css_bus_type = {  	.name     = "css",  	.match    = css_bus_match,  	.probe    = css_probe, @@ -1225,9 +1274,7 @@ struct bus_type css_bus_type = {   */  int css_driver_register(struct css_driver *cdrv)  { -	cdrv->drv.name = cdrv->name;  	cdrv->drv.bus = &css_bus_type; -	cdrv->drv.owner = cdrv->owner;  	return driver_register(&cdrv->drv);  }  EXPORT_SYMBOL_GPL(css_driver_register); @@ -1245,4 +1292,3 @@ void css_driver_unregister(struct css_driver *cdrv)  EXPORT_SYMBOL_GPL(css_driver_unregister);  MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(css_bus_type);  | 
