aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390/cio/chsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/chsc.c')
-rw-r--r--drivers/s390/cio/chsc.c729
1 files changed, 503 insertions, 226 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 29826fdd47b..e3bf885f4a6 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,23 +1,27 @@
/*
- * drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
*
- * Copyright IBM Corp. 1999,2008
+ * Copyright IBM Corp. 1999,2012
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
*/
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/pci.h>
#include <asm/cio.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
+#include <asm/crw.h>
+#include <asm/isc.h>
-#include "../s390mach.h"
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
@@ -26,6 +30,8 @@
#include "chsc.h"
static void *sei_page;
+static void *chsc_page;
+static DEFINE_SPINLOCK(chsc_page_lock);
/**
* chsc_error_from_response() - convert a chsc response to an error
@@ -44,9 +50,16 @@ int chsc_error_from_response(int response)
case 0x0007:
case 0x0008:
case 0x000a:
+ case 0x0104:
return -EINVAL;
case 0x0004:
return -EOPNOTSUPP;
+ case 0x000b:
+ case 0x0107: /* "Channel busy" for the op 0x003d */
+ return -EBUSY;
+ case 0x0100:
+ case 0x0102:
+ return -ENOMEM;
default:
return -EIO;
}
@@ -79,17 +92,15 @@ struct chsc_ssd_area {
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
- unsigned long page;
struct chsc_ssd_area *ssd_area;
int ccode;
int ret;
int i;
int mask;
- page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!page)
- return -ENOMEM;
- ssd_area = (struct chsc_ssd_area *) page;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ ssd_area = chsc_page;
ssd_area->request.length = 0x0010;
ssd_area->request.code = 0x0004;
ssd_area->ssid = schid.ssid;
@@ -100,25 +111,25 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
/* Check response. */
if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY;
- goto out_free;
+ goto out;
}
ret = chsc_error_from_response(ssd_area->response.code);
if (ret != 0) {
CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
ssd_area->response.code);
- goto out_free;
+ goto out;
}
if (!ssd_area->sch_valid) {
ret = -ENODEV;
- goto out_free;
+ goto out;
}
/* Copy data */
ret = 0;
memset(ssd, 0, sizeof(struct chsc_ssd_info));
if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
(ssd_area->st != SUBCHANNEL_TYPE_MSG))
- goto out_free;
+ goto out;
ssd->path_mask = ssd_area->path_mask;
ssd->fla_valid_mask = ssd_area->fla_valid_mask;
for (i = 0; i < 8; i++) {
@@ -130,11 +141,70 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
if (ssd_area->fla_valid_mask & mask)
ssd->fla[i] = ssd_area->fla[i];
}
-out_free:
- free_page(page);
+out:
+ spin_unlock_irq(&chsc_page_lock);
return ret;
}
+/**
+ * chsc_ssqd() - store subchannel QDIO data (SSQD)
+ * @schid: id of the subchannel on which SSQD is performed
+ * @ssqd: request and response block for SSQD
+ *
+ * Returns 0 on success.
+ */
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
+{
+ memset(ssqd, 0, sizeof(*ssqd));
+ ssqd->request.length = 0x0010;
+ ssqd->request.code = 0x0024;
+ ssqd->first_sch = schid.sch_no;
+ ssqd->last_sch = schid.sch_no;
+ ssqd->ssid = schid.ssid;
+
+ if (chsc(ssqd))
+ return -EIO;
+
+ return chsc_error_from_response(ssqd->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_ssqd);
+
+/**
+ * chsc_sadc() - set adapter device controls (SADC)
+ * @schid: id of the subchannel on which SADC is performed
+ * @scssc: request and response block for SADC
+ * @summary_indicator_addr: summary indicator address
+ * @subchannel_indicator_addr: subchannel indicator address
+ *
+ * Returns 0 on success.
+ */
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr)
+{
+ memset(scssc, 0, sizeof(*scssc));
+ scssc->request.length = 0x0fe0;
+ scssc->request.code = 0x0021;
+ scssc->operation_code = 0;
+
+ scssc->summary_indicator_addr = summary_indicator_addr;
+ scssc->subchannel_indicator_addr = subchannel_indicator_addr;
+
+ scssc->ks = PAGE_DEFAULT_KEY >> 4;
+ scssc->kc = PAGE_DEFAULT_KEY >> 4;
+ scssc->isc = QDIO_AIRQ_ISC;
+ scssc->schid = schid;
+
+ /* enable the time delay disablement facility */
+ if (css_general_characteristics.aif_tdd)
+ scssc->word_with_d_bit = 0x10000000;
+
+ if (chsc(scssc))
+ return -EIO;
+
+ return chsc_error_from_response(scssc->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_sadc);
+
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
@@ -168,26 +238,6 @@ void chsc_chp_offline(struct chp_id chpid)
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
-static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
-{
- struct schib schib;
- /*
- * We don't know the device yet, but since a path
- * may be available now to the device we'll have
- * to do recognition again.
- * Since we don't have any idea about which chpid
- * that beast may be on we'll have to do a stsch
- * on all devices, grr...
- */
- if (stsch_err(schid, &schib))
- /* We're through */
- return -ENXIO;
-
- /* Put it on the slow path. */
- css_schedule_eval(schid);
- return 0;
-}
-
static int __s390_process_res_acc(struct subchannel *sch, void *data)
{
spin_lock_irq(sch->lock);
@@ -218,8 +268,8 @@ static void s390_process_res_acc(struct chp_link *link)
* The more information we have (info), the less scanning
* will we have to do.
*/
- for_each_subchannel_staged(__s390_process_res_acc,
- s390_process_res_acc_new_sch, link);
+ for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
+ css_schedule_reprobe();
}
static int
@@ -252,26 +302,46 @@ __get_chpid_from_lir(void *data)
return (u16) (lir->indesc[0]&0x000000ff);
}
-struct chsc_sei_area {
- struct chsc_header request;
+struct chsc_sei_nt0_area {
+ u8 flags;
+ u8 vf; /* validity flags */
+ u8 rs; /* reporting source */
+ u8 cc; /* content code */
+ u16 fla; /* full link address */
+ u16 rsid; /* reporting source id */
u32 reserved1;
u32 reserved2;
- u32 reserved3;
- struct chsc_header response;
- u32 reserved4;
- u8 flags;
- u8 vf; /* validity flags */
- u8 rs; /* reporting source */
- u8 cc; /* content code */
- u16 fla; /* full link address */
- u16 rsid; /* reporting source id */
- u32 reserved5;
- u32 reserved6;
- u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
/* ccdf has to be big enough for a link-incident record */
-} __attribute__ ((packed));
-
-static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
+ u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
+} __packed;
+
+struct chsc_sei_nt2_area {
+ u8 flags; /* p and v bit */
+ u8 reserved1;
+ u8 reserved2;
+ u8 cc; /* content code */
+ u32 reserved3[13];
+ u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
+} __packed;
+
+#define CHSC_SEI_NT0 (1ULL << 63)
+#define CHSC_SEI_NT2 (1ULL << 61)
+
+struct chsc_sei {
+ struct chsc_header request;
+ u32 reserved1;
+ u64 ntsm; /* notification type mask */
+ struct chsc_header response;
+ u32 :24;
+ u8 nt;
+ union {
+ struct chsc_sei_nt0_area nt0_area;
+ struct chsc_sei_nt2_area nt2_area;
+ u8 nt_area[PAGE_SIZE - 24];
+ } u;
+} __packed;
+
+static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
{
struct chp_id chpid;
int id;
@@ -290,7 +360,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
}
}
-static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
struct chp_link link;
struct chp_id chpid;
@@ -322,17 +392,48 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
s390_process_res_acc(&link);
}
+static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ struct channel_path *chp;
+ struct chp_id chpid;
+ u8 *data;
+ int num;
+
+ CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
+ if (sei_area->rs != 0)
+ return;
+ data = sei_area->ccdf;
+ chp_id_init(&chpid);
+ for (num = 0; num <= __MAX_CHPID; num++) {
+ if (!chp_test_bit(data, num))
+ continue;
+ chpid.id = num;
+
+ CIO_CRW_EVENT(4, "Update information for channel path "
+ "%x.%02x\n", chpid.cssid, chpid.id);
+ chp = chpid_to_chp(chpid);
+ if (!chp) {
+ chp_new(chpid);
+ continue;
+ }
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
+}
+
struct chp_config_data {
u8 map[32];
u8 op;
u8 pc;
};
-static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
{
struct chp_config_data *data;
struct chp_id chpid;
int num;
+ char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
if (sei_area->rs != 0)
@@ -343,8 +444,8 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
if (!chp_test_bit(data->map, num))
continue;
chpid.id = num;
- printk(KERN_WARNING "cio: processing configure event %d for "
- "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
+ pr_notice("Processing %s for channel path %x.%02x\n",
+ events[data->op], chpid.cssid, chpid.id);
switch (data->op) {
case 0:
chp_cfg_schedule(chpid, 1);
@@ -359,34 +460,139 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
}
}
-static void chsc_process_sei(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
{
- /* Check if we might have lost some information. */
- if (sei_area->flags & 0x40) {
- CIO_CRW_EVENT(2, "chsc: event overflow\n");
- css_schedule_eval_all();
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm change notification\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_update_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: updating change notification"
+ " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm available information\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_process_availability_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: process availability information"
+ " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
+{
+ switch (sei_area->cc) {
+ case 1:
+ zpci_event_error(sei_area->ccdf);
+ break;
+ case 2:
+ zpci_event_availability(sei_area->ccdf);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
+ sei_area->cc);
+ break;
}
+}
+
+static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
+{
/* which kind of information was stored? */
switch (sei_area->cc) {
case 1: /* link incident*/
chsc_process_sei_link_incident(sei_area);
break;
- case 2: /* i/o resource accessibiliy */
+ case 2: /* i/o resource accessibility */
chsc_process_sei_res_acc(sei_area);
break;
+ case 7: /* channel-path-availability information */
+ chsc_process_sei_chp_avail(sei_area);
+ break;
case 8: /* channel-path-configuration notification */
chsc_process_sei_chp_config(sei_area);
break;
+ case 12: /* scm change notification */
+ chsc_process_sei_scm_change(sei_area);
+ break;
+ case 14: /* scm available notification */
+ chsc_process_sei_scm_avail(sei_area);
+ break;
default: /* other stuff */
- CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
+ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
sei_area->cc);
break;
}
+
+ /* Check if we might have lost some information. */
+ if (sei_area->flags & 0x40) {
+ CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
+}
+
+static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
+{
+ static int ntsm_unsupported;
+
+ while (true) {
+ memset(sei, 0, sizeof(*sei));
+ sei->request.length = 0x0010;
+ sei->request.code = 0x000e;
+ if (!ntsm_unsupported)
+ sei->ntsm = ntsm;
+
+ if (chsc(sei))
+ break;
+
+ if (sei->response.code != 0x0001) {
+ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+ sei->response.code, sei->ntsm);
+
+ if (sei->response.code == 3 && sei->ntsm) {
+ /* Fallback for old firmware. */
+ ntsm_unsupported = 1;
+ continue;
+ }
+ break;
+ }
+
+ CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
+ switch (sei->nt) {
+ case 0:
+ chsc_process_sei_nt0(&sei->u.nt0_area);
+ break;
+ case 2:
+ chsc_process_sei_nt2(&sei->u.nt2_area);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+ break;
+ }
+
+ if (!(sei->u.nt0_area.flags & 0x80))
+ break;
+ }
}
+/*
+ * Handle channel subsystem related CRWs.
+ * Use store event information to find out what's going on.
+ *
+ * Note: Access to sei_page is serialized through machine check handler
+ * thread, so no need for locking.
+ */
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
- struct chsc_sei_area *sei_area;
+ struct chsc_sei *sei = sei_page;
if (overflow) {
css_schedule_eval_all();
@@ -396,29 +602,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
- if (!sei_page)
- return;
- /* Access to sei_page is serialized through machine check handler
- * thread, so no need for locking. */
- sei_area = sei_page;
CIO_TRACE_EVENT(2, "prcss");
- do {
- memset(sei_area, 0, sizeof(*sei_area));
- sei_area->request.length = 0x0010;
- sei_area->request.code = 0x000e;
- if (chsc(sei_area))
- break;
-
- if (sei_area->response.code == 0x0001) {
- CIO_CRW_EVENT(4, "chsc: sei successful\n");
- chsc_process_sei(sei_area);
- } else {
- CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
- sei_area->response.code);
- break;
- }
- } while (sei_area->flags & 0x80);
+ chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
}
void chsc_chp_online(struct chp_id chpid)
@@ -436,6 +622,7 @@ void chsc_chp_online(struct chp_id chpid)
css_wait_for_slow_path();
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
+ css_schedule_reprobe();
}
}
@@ -470,19 +657,6 @@ static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
return 0;
}
-static int
-__s390_vary_chpid_on(struct subchannel_id schid, void *data)
-{
- struct schib schib;
-
- if (stsch_err(schid, &schib))
- /* We're through */
- return -ENXIO;
- /* Put it on the slow path. */
- css_schedule_eval(schid);
- return 0;
-}
-
/**
* chsc_chp_vary - propagate channel-path vary operation to subchannels
* @chpid: channl-path ID
@@ -490,22 +664,22 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
*/
int chsc_chp_vary(struct chp_id chpid, int on)
{
- struct chp_link link;
+ struct channel_path *chp = chpid_to_chp(chpid);
- memset(&link, 0, sizeof(struct chp_link));
- link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
* Redo PathVerification on the devices the chpid connects to
*/
-
- if (on)
+ if (on) {
+ /* Try to update the channel path description. */
+ chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &link);
- else
+ NULL, &chpid);
+ css_schedule_reprobe();
+ } else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
- NULL, &link);
+ NULL, &chpid);
return 0;
}
@@ -545,8 +719,7 @@ cleanup:
return ret;
}
-static int
-__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
+int __chsc_do_secm(struct channel_subsystem *css, int enable)
{
struct {
struct chsc_header request;
@@ -567,61 +740,61 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
} __attribute__ ((packed)) *secm_area;
int ret, ccode;
- secm_area = page;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ secm_area = chsc_page;
secm_area->request.length = 0x0050;
secm_area->request.code = 0x0016;
- secm_area->key = PAGE_DEFAULT_KEY;
+ secm_area->key = PAGE_DEFAULT_KEY >> 4;
secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
secm_area->operation_code = enable ? 0 : 1;
ccode = chsc(secm_area);
- if (ccode > 0)
- return (ccode == 3) ? -ENODEV : -EBUSY;
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
switch (secm_area->response.code) {
case 0x0102:
case 0x0103:
ret = -EINVAL;
+ break;
default:
ret = chsc_error_from_response(secm_area->response.code);
}
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
secm_area->response.code);
+out:
+ spin_unlock_irq(&chsc_page_lock);
return ret;
}
int
chsc_secm(struct channel_subsystem *css, int enable)
{
- void *secm_area;
int ret;
- secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!secm_area)
- return -ENOMEM;
-
if (enable && !css->cm_enabled) {
css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!css->cub_addr1 || !css->cub_addr2) {
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
- free_page((unsigned long)secm_area);
return -ENOMEM;
}
}
- ret = __chsc_do_secm(css, enable, secm_area);
+ ret = __chsc_do_secm(css, enable);
if (!ret) {
css->cm_enabled = enable;
if (css->cm_enabled) {
ret = chsc_add_cmg_attr(css);
if (ret) {
- memset(secm_area, 0, PAGE_SIZE);
- __chsc_do_secm(css, 0, secm_area);
+ __chsc_do_secm(css, 0);
css->cm_enabled = 0;
}
} else
@@ -631,44 +804,24 @@ chsc_secm(struct channel_subsystem *css, int enable)
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
}
- free_page((unsigned long)secm_area);
return ret;
}
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
- int c, int m,
- struct chsc_response_struct *resp)
+ int c, int m, void *page)
{
+ struct chsc_scpd *scpd_area;
int ccode, ret;
- struct {
- struct chsc_header request;
- u32 : 2;
- u32 m : 1;
- u32 c : 1;
- u32 fmt : 4;
- u32 cssid : 8;
- u32 : 4;
- u32 rfmt : 4;
- u32 first_chpid : 8;
- u32 : 24;
- u32 last_chpid : 8;
- u32 zeroes1;
- struct chsc_header response;
- u8 data[PAGE_SIZE - 20];
- } __attribute__ ((packed)) *scpd_area;
-
if ((rfmt == 1) && !css_general_characteristics.fcs)
return -EINVAL;
if ((rfmt == 2) && !css_general_characteristics.cib)
return -EINVAL;
- scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scpd_area)
- return -ENOMEM;
+ memset(page, 0, PAGE_SIZE);
+ scpd_area = page;
scpd_area->request.length = 0x0010;
scpd_area->request.code = 0x0002;
-
scpd_area->cssid = chpid.cssid;
scpd_area->first_chpid = chpid.id;
scpd_area->last_chpid = chpid.id;
@@ -678,20 +831,13 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
scpd_area->rfmt = rfmt;
ccode = chsc(scpd_area);
- if (ccode > 0) {
- ret = (ccode == 3) ? -ENODEV : -EBUSY;
- goto out;
- }
+ if (ccode > 0)
+ return (ccode == 3) ? -ENODEV : -EBUSY;
ret = chsc_error_from_response(scpd_area->response.code);
- if (ret == 0)
- /* Success. */
- memcpy(resp, &scpd_area->response, scpd_area->response.length);
- else
+ if (ret)
CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
scpd_area->response.code);
-out:
- free_page((unsigned long)scpd_area);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
@@ -700,17 +846,39 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc)
{
struct chsc_response_struct *chsc_resp;
+ struct chsc_scpd *scpd_area;
+ unsigned long flags;
int ret;
- chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
- if (!chsc_resp)
- return -ENOMEM;
- ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ scpd_area = chsc_page;
+ ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
+ if (ret)
+ goto out;
+ chsc_resp = (void *)&scpd_area->response;
+ memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return ret;
+}
+
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt1 *desc)
+{
+ struct chsc_response_struct *chsc_resp;
+ struct chsc_scpd *scpd_area;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ scpd_area = chsc_page;
+ ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
if (ret)
- goto out_free;
- memcpy(desc, &chsc_resp->data, chsc_resp->length);
-out_free:
- kfree(chsc_resp);
+ goto out;
+ chsc_resp = (void *)&scpd_area->response;
+ memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -718,33 +886,22 @@ static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
struct cmg_chars *chars)
{
- switch (chp->cmg) {
- case 2:
- case 3:
- chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
- GFP_KERNEL);
- if (chp->cmg_chars) {
- int i, mask;
- struct cmg_chars *cmg_chars;
-
- cmg_chars = chp->cmg_chars;
- for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
- mask = 0x80 >> (i + 3);
- if (cmcv & mask)
- cmg_chars->values[i] = chars->values[i];
- else
- cmg_chars->values[i] = 0;
- }
- }
- break;
- default:
- /* No cmg-dependent data. */
- break;
+ struct cmg_chars *cmg_chars;
+ int i, mask;
+
+ cmg_chars = chp->cmg_chars;
+ for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
+ mask = 0x80 >> (i + 3);
+ if (cmcv & mask)
+ cmg_chars->values[i] = chars->values[i];
+ else
+ cmg_chars->values[i] = 0;
}
}
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
+ struct cmg_chars *cmg_chars;
int ccode, ret;
struct {
@@ -768,13 +925,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 data[NR_MEASUREMENT_CHARS];
} __attribute__ ((packed)) *scmc_area;
- scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scmc_area)
+ chp->cmg_chars = NULL;
+ cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
+ if (!cmg_chars)
return -ENOMEM;
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scmc_area = chsc_page;
scmc_area->request.length = 0x0010;
scmc_area->request.code = 0x0022;
-
scmc_area->first_chpid = chp->chpid.id;
scmc_area->last_chpid = chp->chpid.id;
@@ -785,52 +945,63 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
}
ret = chsc_error_from_response(scmc_area->response.code);
- if (ret == 0) {
- /* Success. */
- if (!scmc_area->not_valid) {
- chp->cmg = scmc_area->cmg;
- chp->shared = scmc_area->shared;
- chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
- (struct cmg_chars *)
- &scmc_area->data);
- } else {
- chp->cmg = -1;
- chp->shared = -1;
- }
- } else {
+ if (ret) {
CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
scmc_area->response.code);
+ goto out;
+ }
+ if (scmc_area->not_valid) {
+ chp->cmg = -1;
+ chp->shared = -1;
+ goto out;
+ }
+ chp->cmg = scmc_area->cmg;
+ chp->shared = scmc_area->shared;
+ if (chp->cmg != 2 && chp->cmg != 3) {
+ /* No cmg-dependent data. */
+ goto out;
}
+ chp->cmg_chars = cmg_chars;
+ chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
+ (struct cmg_chars *) &scmc_area->data);
out:
- free_page((unsigned long)scmc_area);
+ spin_unlock_irq(&chsc_page_lock);
+ if (!chp->cmg_chars)
+ kfree(cmg_chars);
+
return ret;
}
-int __init chsc_alloc_sei_area(void)
+int __init chsc_init(void)
{
int ret;
sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sei_page) {
- CIO_MSG_EVENT(0, "Can't allocate page for processing of "
- "chsc machine checks!\n");
- return -ENOMEM;
+ chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sei_page || !chsc_page) {
+ ret = -ENOMEM;
+ goto out_err;
}
- ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
+ ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
if (ret)
- kfree(sei_page);
+ goto out_err;
+ return ret;
+out_err:
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
return ret;
}
-void __init chsc_free_sei_area(void)
+void __init chsc_init_cleanup(void)
{
- s390_unregister_crw_handler(CRW_RSC_CSS);
- kfree(sei_page);
+ crw_unregister_handler(CRW_RSC_CSS);
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
}
-int __init
-chsc_enable_facility(int operation_code)
+int chsc_enable_facility(int operation_code)
{
+ unsigned long flags;
int ret;
struct {
struct chsc_header request;
@@ -847,9 +1018,9 @@ chsc_enable_facility(int operation_code)
u32 reserved6:24;
} __attribute__ ((packed)) *sda_area;
- sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if (!sda_area)
- return -ENOMEM;
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sda_area = chsc_page;
sda_area->request.length = 0x0400;
sda_area->request.code = 0x0031;
sda_area->operation_code = operation_code;
@@ -870,8 +1041,8 @@ chsc_enable_facility(int operation_code)
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
operation_code, sda_area->response.code);
- out:
- free_page((unsigned long)sda_area);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -890,13 +1061,12 @@ chsc_determine_css_characteristics(void)
struct chsc_header response;
u32 reserved4;
u32 general_char[510];
- u32 chsc_char[518];
+ u32 chsc_char[508];
} __attribute__ ((packed)) *scsc_area;
- scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scsc_area)
- return -ENOMEM;
-
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scsc_area = chsc_page;
scsc_area->request.length = 0x0010;
scsc_area->request.code = 0x0010;
@@ -916,7 +1086,7 @@ chsc_determine_css_characteristics(void)
CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
scsc_area->response.code);
exit:
- free_page ((unsigned long) scsc_area);
+ spin_unlock_irq(&chsc_page_lock);
return result;
}
@@ -971,3 +1141,110 @@ int chsc_sstpi(void *page, void *result, size_t size)
return (rr->response.code == 0x0001) ? 0 : -EIO;
}
+int chsc_siosl(struct subchannel_id schid)
+{
+ struct {
+ struct chsc_header request;
+ u32 word1;
+ struct subchannel_id sid;
+ u32 word3;
+ struct chsc_header response;
+ u32 word[11];
+ } __attribute__ ((packed)) *siosl_area;
+ unsigned long flags;
+ int ccode;
+ int rc;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ siosl_area = chsc_page;
+ siosl_area->request.length = 0x0010;
+ siosl_area->request.code = 0x0046;
+ siosl_area->word1 = 0x80000000;
+ siosl_area->sid = schid;
+
+ ccode = chsc(siosl_area);
+ if (ccode > 0) {
+ if (ccode == 3)
+ rc = -ENODEV;
+ else
+ rc = -EBUSY;
+ CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
+ schid.ssid, schid.sch_no, ccode);
+ goto out;
+ }
+ rc = chsc_error_from_response(siosl_area->response.code);
+ if (rc)
+ CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
+ siosl_area->response.code);
+ else
+ CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
+ schid.ssid, schid.sch_no);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(chsc_siosl);
+
+/**
+ * chsc_scm_info() - store SCM information (SSI)
+ * @scm_area: request and response block for SSI
+ * @token: continuation token
+ *
+ * Returns 0 on success.
+ */
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
+{
+ int ccode, ret;
+
+ memset(scm_area, 0, sizeof(*scm_area));
+ scm_area->request.length = 0x0020;
+ scm_area->request.code = 0x004C;
+ scm_area->reqtok = token;
+
+ ccode = chsc(scm_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+ ret = chsc_error_from_response(scm_area->response.code);
+ if (ret != 0)
+ CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
+ scm_area->response.code);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_scm_info);
+
+/**
+ * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
+ * @schid: id of the subchannel on which PNSO is performed
+ * @brinfo_area: request and response block for the operation
+ * @resume_token: resume token for multiblock response
+ * @cnc: Boolean change-notification control
+ *
+ * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int chsc_pnso_brinfo(struct subchannel_id schid,
+ struct chsc_pnso_area *brinfo_area,
+ struct chsc_brinfo_resume_token resume_token,
+ int cnc)
+{
+ memset(brinfo_area, 0, sizeof(*brinfo_area));
+ brinfo_area->request.length = 0x0030;
+ brinfo_area->request.code = 0x003d; /* network-subchannel operation */
+ brinfo_area->m = schid.m;
+ brinfo_area->ssid = schid.ssid;
+ brinfo_area->sch = schid.sch_no;
+ brinfo_area->cssid = schid.cssid;
+ brinfo_area->oc = 0; /* Store-network-bridging-information list */
+ brinfo_area->resume_token = resume_token;
+ brinfo_area->n = (cnc != 0);
+ if (chsc(brinfo_area))
+ return -EIO;
+ return chsc_error_from_response(brinfo_area->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);