/*
* drivers/s390/cio/css.c
* driver for channel subsystem
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/reboot.h>
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
#include "device.h"
#include "idset.h"
#include "chp.h"
int css_init_done = 0;
static int need_reprobe = 0;
static int max_ssid = 0;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
int css_characteristics_avail = 0;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
{
struct subchannel_id schid;
int ret;
init_subchannel_id(&schid);
ret = -ENODEV;
do {
do {
ret = fn(schid, data);
if (ret)
break;
} while (schid.sch_no++ < __MAX_SUBCHANNEL);
schid.sch_no = 0;
} while (schid.ssid++ < max_ssid);
return ret;
}
static struct subchannel *
css_alloc_subchannel(struct subchannel_id schid)
{
struct subchannel *sch;
int ret;
sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
if (sch == NULL)
return ERR_PTR(-ENOMEM);
ret = cio_validate_subchannel (sch, schid);
if (ret < 0) {
kfree(sch);
return ERR_PTR(ret);
}
if (sch->st != SUBCHANNEL_TYPE_IO) {
/* For now we ignore all non-io subchannels. */
kfree(sch);
return ERR_PTR(-EINVAL);
}
/*
* Set intparm to subchannel address.
* This is fine even on 64bit since the subchannel is always located
* under 2G.
*/
sch->schib.pmcw.intparm = (u32)(addr_t)sch;
ret = cio_modify(sch);
if (ret) {
kfree(sch->lock);
kfree(sch);
return ERR_PTR(ret);
}
return sch;
}
static void
css_free_subchannel(struct subchannel *sch)
{
if (sch) {
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
kfree(sch->lock);
kfree(sch);
}
}
static void
css_subchannel_release(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (!cio_is_console(sch->schid)) {
kfree(sch->lock);
kfree(sch);
}
}
static int css_sch_device_register(struct subchannel *sch)
{
int ret;
mutex_lock(&sch->reg_mutex);
ret = device_register(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
}
void css_sch_device_unregister(struct subchannel *sch)
{
mutex_lock(&sch->reg_mutex);
device_unregister(&sch->dev);
mutex_unlock(&sch->reg_mutex);
}
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
int mask;
memset(ssd, 0, sizeof(struct chsc_ssd_info));
ssd->path_mask = pmcw->pim;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (pmcw->pim & mask) {
chp_id_init(&ssd->chpid[i]);
ssd->chpid[i].id = pmcw->chpid[i];
}
}
}
static void ssd_register_chpids(struct chsc_ssd_info *ssd)
{
int i;
int mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (ssd->path_mask & mask)
if (!chp_is_registered(ssd->chpid[i]))
chp_new(ssd->chpid[i]);
}
}
void css_update_ssd_info(struct subchannel *sch)
{
int ret;
if (cio_is_console(sch->schid)) {
/* Console is initialized too early for functions requiring
* memory allocation. */
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
} else {
ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
if (ret)
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
ssd_register_chpids(&sch->ssd_info);
}
}
static int css_register_subchannel(struct subchannel *sch)
{
int ret;
/* Initialize the subchannel structure */
sch->