aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390/cio/qdio_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
-rw-r--r--drivers/s390/cio/qdio_main.c225
1 files changed, 120 insertions, 105 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e06fa03ea1e..848e3b64ea6 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -129,7 +129,6 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
unsigned int ccq = 0;
- BUG_ON(!q->irq_ptr->sch_token);
qperf_inc(q, eqbs);
if (!q->is_input_q)
@@ -147,7 +146,6 @@ again:
}
if (rc == 2) {
- BUG_ON(tmp_count == count);
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
tmp_count);
@@ -189,8 +187,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
if (!count)
return 0;
-
- BUG_ON(!q->irq_ptr->sch_token);
qperf_inc(q, sqbs);
if (!q->is_input_q)
@@ -199,7 +195,7 @@ again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
if (!rc) {
- WARN_ON(tmp_count);
+ WARN_ON_ONCE(tmp_count);
return count - tmp_count;
}
@@ -224,9 +220,6 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char __state = 0;
int i;
- BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
- BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
-
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
@@ -258,9 +251,6 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr,
{
int i;
- BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
- BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
-
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
@@ -345,14 +335,13 @@ again:
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
- WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
retries++;
if (!start_time) {
- start_time = get_clock();
+ start_time = get_tod_clock_fast();
goto again;
}
- if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
if (retries) {
@@ -420,17 +409,16 @@ static inline void qdio_stop_polling(struct qdio_q *q)
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
}
-static inline void account_sbals(struct qdio_q *q, int count)
+static inline void account_sbals(struct qdio_q *q, unsigned int count)
{
- int pos = 0;
+ int pos;
q->q_stats.nr_sbal_total += count;
if (count == QDIO_MAX_BUFFERS_MASK) {
q->q_stats.nr_sbals[7]++;
return;
}
- while (count >>= 1)
- pos++;
+ pos = ilog2(count);
q->q_stats.nr_sbals[pos]++;
}
@@ -515,7 +503,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
- q->timestamp = get_clock();
+ q->timestamp = get_tod_clock_fast();
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -539,7 +527,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
- if (atomic_sub(count, &q->nr_buf_used) == 0)
+ if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
@@ -559,7 +547,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
out:
return q->first_to_check;
@@ -574,7 +562,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
if (bufnr != q->last_move) {
q->last_move = bufnr;
if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_clock();
+ q->u.in.timestamp = get_tod_clock();
return 1;
} else
return 0;
@@ -606,7 +594,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
- if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+ if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
@@ -619,50 +607,6 @@ static inline int contains_aobs(struct qdio_q *q)
return !q->is_input_q && q->u.out.use_cq;
}
-static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
- int i, struct qaob *aob)
-{
- int tmp;
-
- DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
- (unsigned long) virt_to_phys(aob));
- DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
- (unsigned long) aob->res0[0]);
- DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
- (unsigned long) aob->res0[1]);
- DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
- (unsigned long) aob->res0[2]);
- DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
- (unsigned long) aob->res0[3]);
- DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
- (unsigned long) aob->res0[4]);
- DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
- (unsigned long) aob->res0[5]);
- DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
- DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
- DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
- DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
- DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
- DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
- DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
- for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
- DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
- (unsigned long) aob->sba[tmp]);
- DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
- (unsigned long) q->sbal[i]->element[tmp].addr);
- DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
- DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
- q->sbal[i]->element[tmp].length);
- }
- DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
- for (tmp = 0; tmp < 2; ++tmp) {
- DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
- (unsigned long) aob->res4[tmp]);
- }
- DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
- DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
-}
-
static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
{
unsigned char state = 0;
@@ -678,12 +622,10 @@ static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
if (aob == NULL)
continue;
- BUG_ON(q->u.out.sbal_state == NULL);
q->u.out.sbal_state[b].flags |=
QDIO_OUTBUF_STATE_FLAG_PENDING;
q->u.out.aobs[b] = NULL;
} else if (state == SLSB_P_OUTPUT_EMPTY) {
- BUG_ON(q->u.out.sbal_state == NULL);
q->u.out.sbal_state[b].aob = NULL;
}
b = next_buf(b);
@@ -703,12 +645,11 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- BUG_ON(q->sbal_state == NULL);
q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
- BUG_ON(phys_aob & 0xFF);
+ WARN_ON_ONCE(phys_aob & 0xFF);
}
out:
@@ -786,7 +727,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
int count, stop;
unsigned char state = 0;
- q->timestamp = get_clock();
+ q->timestamp = get_tod_clock_fast();
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
@@ -809,8 +750,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
goto out;
switch (state) {
- case SLSB_P_OUTPUT_PENDING:
- BUG();
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
@@ -840,7 +779,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_OUTPUT_HALTED:
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
out:
@@ -912,7 +851,7 @@ retry:
static void __qdio_outbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_outbound);
- BUG_ON(atomic_read(&q->nr_buf_used) < 0);
+ WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
qdio_kick_handler(q);
@@ -1056,7 +995,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
- if (!pci_out_supported(q))
+ if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return;
for_each_output_queue(irq_ptr, q, i) {
@@ -1138,16 +1077,10 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
irq_ptr->perf_stat.qdio_int++;
if (IS_ERR(irb)) {
- switch (PTR_ERR(irb)) {
- case -EIO:
- DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- wake_up(&cdev->private->wait_q);
- return;
- default:
- WARN_ON(1);
- return;
- }
+ DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ wake_up(&cdev->private->wait_q);
+ return;
}
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
@@ -1173,7 +1106,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
case QDIO_IRQ_STATE_STOPPED:
break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
wake_up(&cdev->private->wait_q);
}
@@ -1227,7 +1160,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
if (!irq_ptr)
return -ENODEV;
- BUG_ON(irqs_disabled());
+ WARN_ON_ONCE(irqs_disabled());
DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
@@ -1248,7 +1181,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
tiqdio_remove_input_queues(irq_ptr);
qdio_shutdown_queues(cdev);
- qdio_shutdown_debug_entries(irq_ptr, cdev);
+ qdio_shutdown_debug_entries(irq_ptr);
/* cleanup subchannel */
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
@@ -1300,12 +1233,10 @@ int qdio_free(struct ccw_device *cdev)
return -ENODEV;
DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
mutex_lock(&irq_ptr->setup_mutex);
- if (irq_ptr->debug_area != NULL) {
- debug_unregister(irq_ptr->debug_area);
- irq_ptr->debug_area = NULL;
- }
+ irq_ptr->debug_area = NULL;
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1342,7 +1273,8 @@ int qdio_allocate(struct qdio_initialize *init_data)
goto out_err;
mutex_init(&irq_ptr->setup_mutex);
- qdio_allocate_dbf(init_data, irq_ptr);
+ if (qdio_allocate_dbf(init_data, irq_ptr))
+ goto out_rel;
/*
* Allocate a page for the chsc calls in qdio_establish.
@@ -1358,7 +1290,6 @@ int qdio_allocate(struct qdio_initialize *init_data)
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
goto out_rel;
- WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
init_data->no_output_qs))
@@ -1564,7 +1495,7 @@ static inline int buf_in_between(int bufnr, int start, int count)
static int handle_inbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
- int used, diff;
+ int diff;
qperf_inc(q, inbound_call);
@@ -1597,9 +1528,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
-
- used = atomic_add_return(count, &q->nr_buf_used) - count;
- BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
+ atomic_add(count, &q->nr_buf_used);
if (need_siga_in(q))
return qdio_siga_input(q);
@@ -1624,7 +1553,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
- BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
if (used == QDIO_MAX_BUFFERS_PER_Q)
qperf_inc(q, outbound_queue_full);
@@ -1678,7 +1606,6 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
{
struct qdio_irq *irq_ptr;
-
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
@@ -1721,8 +1648,6 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
- WARN_ON(queue_irqs_enabled(q));
-
clear_nonshared_ind(irq_ptr);
qdio_stop_polling(q);
clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
@@ -1769,7 +1694,6 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
- WARN_ON(queue_irqs_enabled(q));
/*
* Cannot rely on automatic sync after interrupt since queues may
@@ -1826,6 +1750,97 @@ int qdio_stop_irq(struct ccw_device *cdev, int nr)
}
EXPORT_SYMBOL(qdio_stop_irq);
+/**
+ * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
+ * @schid: Subchannel ID.
+ * @cnc: Boolean Change-Notification Control
+ * @response: Response code will be stored at this address
+ * @cb: Callback function will be executed for each element
+ * of the address list
+ * @priv: Pointer passed from the caller to qdio_pnso_brinfo()
+ * @type: Type of the address entry passed to the callback
+ * @entry: Entry containg the address of the specified type
+ * @priv: Pointer to pass to the callback function.
+ *
+ * Performs "Store-network-bridging-information list" operation and calls
+ * the callback function for every entry in the list. If "change-
+ * notification-control" is set, further changes in the address list
+ * will be reported via the IPA command.
+ */
+int qdio_pnso_brinfo(struct subchannel_id schid,
+ int cnc, u16 *response,
+ void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
+ void *entry),
+ void *priv)
+{
+ struct chsc_pnso_area *rr;
+ int rc;
+ u32 prev_instance = 0;
+ int isfirstblock = 1;
+ int i, size, elems;
+
+ rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
+ if (rr == NULL)
+ return -ENOMEM;
+ do {
+ /* on the first iteration, naihdr.resume_token will be zero */
+ rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
+ if (rc != 0 && rc != -EBUSY)
+ goto out;
+ if (rr->response.code != 1) {
+ rc = -EIO;
+ continue;
+ } else
+ rc = 0;
+
+ if (cb == NULL)
+ continue;
+
+ size = rr->naihdr.naids;
+ elems = (rr->response.length -
+ sizeof(struct chsc_header) -
+ sizeof(struct chsc_brinfo_naihdr)) /
+ size;
+
+ if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
+ /* Inform the caller that they need to scrap */
+ /* the data that was already reported via cb */
+ rc = -EAGAIN;
+ break;
+ }
+ isfirstblock = 0;
+ prev_instance = rr->naihdr.instance;
+ for (i = 0; i < elems; i++)
+ switch (size) {
+ case sizeof(struct qdio_brinfo_entry_l3_ipv6):
+ (*cb)(priv, l3_ipv6_addr,
+ &rr->entries.l3_ipv6[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l3_ipv4):
+ (*cb)(priv, l3_ipv4_addr,
+ &rr->entries.l3_ipv4[i]);
+ break;
+ case sizeof(struct qdio_brinfo_entry_l2):
+ (*cb)(priv, l2_addr_lnid,
+ &rr->entries.l2[i]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ rc = -EIO;
+ goto out;
+ }
+ } while (rr->response.code == 0x0107 || /* channel busy */
+ (rr->response.code == 1 && /* list stored */
+ /* resume token is non-zero => list incomplete */
+ (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
+ (*response) = rr->response.code;
+
+out:
+ free_page((unsigned long)rr);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
+
static int __init init_QDIO(void)
{
int rc;