diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-17 20:53:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-17 20:53:52 -0700 |
commit | f205ce83a766c08965ec78342f138cdc00631fba (patch) | |
tree | 7a9d2db6c16594ef7c730ca93a87131cf0abca41 /drivers/net | |
parent | 3dc95666df0e1ae5b7381a8ec97a583bb3ce4306 (diff) | |
parent | b31c50a7f9e93a61d14740dedcbbf2c376998bc7 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (66 commits)
be2net: fix some cmds to use mccq instead of mbox
atl1e: fix 2.6.31-git4 -- ATL1E 0000:03:00.0: DMA-API: device driver frees DMA
pkt_sched: Fix qstats.qlen updating in dump_stats
ipv6: Log the affected address when DAD failure occurs
wl12xx: Fix print_mac() conversion.
af_iucv: fix race when queueing skbs on the backlog queue
af_iucv: do not call iucv_sock_kill() twice
af_iucv: handle non-accepted sockets after resuming from suspend
af_iucv: fix race in __iucv_sock_wait()
iucv: use correct output register in iucv_query_maxconn()
iucv: fix iucv_buffer_cpumask check when calling IUCV functions
iucv: suspend/resume error msg for left over pathes
wl12xx: switch to %pM to print the mac address
b44: the poll handler b44_poll must not enable IRQ unconditionally
ipv6: Ignore route option with ROUTER_PREF_INVALID
bonding: make ab_arp select active slaves as other modes
cfg80211: fix SME connect
rc80211_minstrel: fix contention window calculation
ssb/sdio: fix printk format warnings
p54usb: add Zcomax XG-705A usbid
...
Diffstat (limited to 'drivers/net')
37 files changed, 545 insertions, 479 deletions
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h index ba48220df16..490d3b38e0c 100644 --- a/drivers/net/atl1e/atl1e.h +++ b/drivers/net/atl1e/atl1e.h @@ -377,10 +377,19 @@ struct atl1e_hw { */ struct atl1e_tx_buffer { struct sk_buff *skb; + u16 flags; +#define ATL1E_TX_PCIMAP_SINGLE 0x0001 +#define ATL1E_TX_PCIMAP_PAGE 0x0002 +#define ATL1E_TX_PCIMAP_TYPE_MASK 0x0003 u16 length; dma_addr_t dma; }; +#define ATL1E_SET_PCIMAP_TYPE(tx_buff, type) do { \ + ((tx_buff)->flags) &= ~ATL1E_TX_PCIMAP_TYPE_MASK; \ + ((tx_buff)->flags) |= (type); \ + } while (0) + struct atl1e_rx_page { dma_addr_t dma; /* receive rage DMA address */ u8 *addr; /* receive rage virtual address */ diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index 69b830f4b68..955da733c2a 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -635,7 +635,11 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) for (index = 0; index < ring_count; index++) { tx_buffer = &tx_ring->tx_buffer[index]; if (tx_buffer->dma) { - pci_unmap_page(pdev, tx_buffer->dma, + if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) + pci_unmap_single(pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) + pci_unmap_page(pdev, tx_buffer->dma, tx_buffer->length, PCI_DMA_TODEVICE); tx_buffer->dma = 0; } @@ -1220,7 +1224,11 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) while (next_to_clean != hw_next_to_clean) { tx_buffer = &tx_ring->tx_buffer[next_to_clean]; if (tx_buffer->dma) { - pci_unmap_page(adapter->pdev, tx_buffer->dma, + if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) + pci_unmap_single(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) + pci_unmap_page(adapter->pdev, tx_buffer->dma, tx_buffer->length, PCI_DMA_TODEVICE); tx_buffer->dma = 0; } @@ -1741,6 +1749,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, tx_buffer->length = map_len; tx_buffer->dma = pci_map_single(adapter->pdev, skb->data, hdr_len, PCI_DMA_TODEVICE); + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | @@ -1766,6 +1775,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, tx_buffer->dma = pci_map_single(adapter->pdev, skb->data + mapped_len, map_len, PCI_DMA_TODEVICE); + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | @@ -1801,6 +1811,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, (i * MAX_TX_BUF_LEN), tx_buffer->length, PCI_DMA_TODEVICE); + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | ((cpu_to_le32(tx_buffer->length) & diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 0189dcd36f3..e046943ef29 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -847,23 +847,22 @@ static int b44_poll(struct napi_struct *napi, int budget) { struct b44 *bp = container_of(napi, struct b44, napi); int work_done; + unsigned long flags; - spin_lock_irq(&bp->lock); + spin_lock_irqsave(&bp->lock, flags); if (bp->istat & (ISTAT_TX | ISTAT_TO)) { /* spin_lock(&bp->tx_lock); */ b44_tx(bp); /* spin_unlock(&bp->tx_lock); */ } - spin_unlock_irq(&bp->lock); + spin_unlock_irqrestore(&bp->lock, flags); work_done = 0; if (bp->istat & ISTAT_RX) work_done += b44_rx(bp, budget); if (bp->istat & ISTAT_ERRORS) { - unsigned long flags; - spin_lock_irqsave(&bp->lock, flags); b44_halt(bp); b44_init_rings(bp); diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 13b72ce870d..684c6fe24c8 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -362,5 +362,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb) extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped); extern void be_link_status_update(struct be_adapter *adapter, bool link_up); +extern void netdev_stats_update(struct be_adapter *adapter); extern int be_load_fw(struct be_adapter *adapter, u8 *func); #endif /* BE_H */ diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 1db09249830..3dd76c4170b 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -59,15 +59,22 @@ static int be_mcc_compl_process(struct be_adapter *adapter, compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & CQE_STATUS_COMPL_MASK; - if (compl_status != MCC_STATUS_SUCCESS) { + if (compl_status == MCC_STATUS_SUCCESS) { + if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { + struct be_cmd_resp_get_stats *resp = + adapter->stats.cmd.va; + be_dws_le_to_cpu(&resp->hw_stats, + sizeof(resp->hw_stats)); + netdev_stats_update(adapter); + } + } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) { extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & CQE_STATUS_EXTD_MASK; dev_warn(&adapter->pdev->dev, "Error in cmd completion: status(compl/extd)=%d/%d\n", compl_status, extd_status); - return -1; } - return 0; + return compl_status; } /* Link state evt is a string of bytes; no need for endian swapping */ @@ -97,10 +104,10 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) return NULL; } -void be_process_mcc(struct be_adapter *adapter) +int be_process_mcc(struct be_adapter *adapter) { struct be_mcc_compl *compl; - int num = 0; + int num = 0, status = 0; spin_lock_bh(&adapter->mcc_cq_lock); while ((compl = be_mcc_compl_get(adapter))) { @@ -111,38 +118,47 @@ void be_process_mcc(struct be_adapter *adapter) /* Interpret compl as a async link evt */ be_async_link_state_process(adapter, (struct be_async_event_link_state *) compl); - } else { - be_mcc_compl_process(adapter, compl); - atomic_dec(&adapter->mcc_obj.q.used); + } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { + status = be_mcc_compl_process(adapter, compl); + atomic_dec(&adapter->mcc_obj.q.used); } be_mcc_compl_use(compl); num++; } + if (num) be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num); + spin_unlock_bh(&adapter->mcc_cq_lock); + return status; } /* Wait till no more pending mcc requests are present */ -static void be_mcc_wait_compl(struct be_adapter *adapter) +static int be_mcc_wait_compl(struct be_adapter *adapter) { -#define mcc_timeout 50000 /* 5s timeout */ - int i; +#define mcc_timeout 120000 /* 12s timeout */ + int i, status; for (i = 0; i < mcc_timeout; i++) { - be_process_mcc(adapter); + status = be_process_mcc(adapter); + if (status) + return status; + if (atomic_read(&adapter->mcc_obj.q.used) == 0) break; udelay(100); } - if (i == mcc_timeout) + if (i == mcc_timeout) { dev_err(&adapter->pdev->dev, "mccq poll timed out\n"); + return -1; + } + return 0; } /* Notify MCC requests and wait for completion */ -static void be_mcc_notify_wait(struct be_adapter *adapter) +static int be_mcc_notify_wait(struct be_adapter *adapter) { be_mcc_notify(adapter); - be_mcc_wait_compl(adapter); + return be_mcc_wait_compl(adapter); } static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) @@ -173,7 +189,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) * Insert the mailbox address into the doorbell in two steps * Polls on the mbox doorbell till a command completion (or a timeout) occurs */ -static int be_mbox_notify(struct be_adapter *adapter) +static int be_mbox_notify_wait(struct be_adapter *adapter) { int status; u32 val = 0; @@ -182,8 +198,6 @@ static int be_mbox_notify(struct be_adapter *adapter) struct be_mcc_mailbox *mbox = mbox_mem->va; struct be_mcc_compl *compl = &mbox->compl; - memset(compl, 0, sizeof(*compl)); - val |= MPU_MAILBOX_DB_HI_MASK; /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -310,34 +324,40 @@ static u32 eq_delay_to_mult(u32 usec_delay) return multiplier; } -static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) +static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) { - return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; + struct be_dma_mem *mbox_mem = &adapter->mbox_mem; + struct be_mcc_wrb *wrb + = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; + memset(wrb, 0, sizeof(*wrb)); + return wrb; } -static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq) +static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) { - struct be_mcc_wrb *wrb = NULL; - if (atomic_read(&mccq->used) < mccq->len) { - wrb = queue_head_node(mccq); - queue_head_inc(mccq); - atomic_inc(&mccq->used); - memset(wrb, 0, sizeof(*wrb)); - } + struct be_queue_info *mccq = &adapter->mcc_obj.q; + struct be_mcc_wrb *wrb; + + BUG_ON(atomic_read(&mccq->used) >= mccq->len); + wrb = queue_head_node(mccq); + queue_head_inc(mccq); + atomic_inc(&mccq->used); + memset(wrb, 0, sizeof(*wrb)); return wrb; } int be_cmd_eq_create(struct be_adapter *adapter, struct be_queue_info *eq, int eq_delay) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_eq_create *req = embedded_payload(wrb); - struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_eq_create *req; struct be_dma_mem *q_mem = &eq->dma_mem; int status; spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -359,25 +379,29 @@ int be_cmd_eq_create(struct be_adapter *adapter, be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); - status = be_mbox_notify(adapter); + status = be_mbox_notify_wait(adapter); if (!status) { + struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); eq->id = le16_to_cpu(resp->eq_id); eq->created = true; } + spin_unlock(&adapter->mbox_lock); return status; } +/* Uses mbox */ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, u8 type, bool permanent, u32 if_handle) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_mac_query *req = embedded_payload(wrb); - struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_mac_query *req; int status; spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -388,27 +412,32 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, if (permanent) { req->permanent = 1; } else { - req->if_id = cpu_to_le16((u16)if_handle); + req->if_id = cpu_to_le16((u16) if_handle); req->permanent = 0; } - status = be_mbox_notify(adapter); - if (!status) + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); memcpy(mac_addr, resp->mac.addr, ETH_ALEN); + } spin_unlock(&adapter->mbox_lock); return status; } +/* Uses synchronous MCCQ */ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, u32 *pmac_id) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_pmac_add *req = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_pmac_add *req; int status; - spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -418,24 +447,27 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, req->if_id = cpu_to_le32(if_id); memcpy(req->mac_address, mac_addr, ETH_ALEN); - status = be_mbox_notify(adapter); + status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); *pmac_id = le32_to_cpu(resp->pmac_id); } - spin_unlock(&adapter->mbox_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } +/* Uses synchronous MCCQ */ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_pmac_del *req = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_pmac_del *req; int status; - spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -445,25 +477,29 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) req->if_id = cpu_to_le32(if_id); req->pmac_id = cpu_to_le32(pmac_id); - status = be_mbox_notify(adapter); - spin_unlock(&adapter->mbox_lock); + status = be_mcc_notify_wait(adapter); + + spin_unlock_bh(&adapter->mcc_lock); return status; } +/* Uses Mbox */ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, struct be_queue_info *eq, bool sol_evts, bool no_delay, int coalesce_wm) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_cq_create *req = embedded_payload(wrb); - struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_cq_create *req; struct be_dma_mem *q_mem = &cq->dma_mem; - void *ctxt = &req->context; + void *ctxt; int status; spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + ctxt = &req->context; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -486,11 +522,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); - status = be_mbox_notify(adapter); + status = be_mbox_notify_wait(adapter); if (!status) { + struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); cq->id = le16_to_cpu(resp->cq_id); cq->created = true; } + spin_unlock(&adapter->mbox_lock); return status; @@ -508,14 +546,17 @@ int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq, struct be_queue_info *cq) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_mcc_create *req = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_mcc_create *req; struct be_dma_mem *q_mem = &mccq->dma_mem; - void *ctxt = &req->context; + void *ctxt; int status; spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + ctxt = &req->context; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -534,7 +575,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter, be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); - status = be_mbox_notify(adapter); + status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); mccq->id = le16_to_cpu(resp->id); @@ -549,15 +590,17 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_queue_info *txq, struct be_queue_info *cq) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_eth_tx_create *req; struct be_dma_mem *q_mem = &txq->dma_mem; - void *ctxt = &req->context; + void *ctxt; int status; - u32 len_encoded; spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + ctxt = &req->context; be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -568,10 +611,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, req->ulp_num = BE_ULP1_NUM; req->type = BE_ETH_TX_RING_TYPE_STANDARD; - len_encoded = fls(txq->len); /* log2(len) + 1 */ - if (len_encoded == 16) - len_encoded = 0; - AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded); + AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, + be_encoded_q_len(txq->len)); AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt, be_pci_func(adapter)); AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); @@ -581,28 +622,32 @@ int be_cmd_txq_create(struct be_adapter *adapter, be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); - status = be_mbox_notify(adapter); + status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); txq->id = le16_to_cpu(resp->cid); txq->created = true; } + spin_unlock(&adapter->mbox_lock); return status; } +/* Uses mbox */ int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq, u16 cq_id, u16 frag_size, u16 max_frame_size, u32 if_id, u32 rss) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_eth_rx_create *req; struct be_dma_mem *q_mem = &rxq->dma_mem; int status; spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -617,29 +662,34 @@ int be_cmd_rxq_create(struct be_adapter *adapter, req->max_frame_size = cpu_to_le16(max_frame_size); req->rss_queue = cpu_to_le32(rss); - status = be_mbox_notify(adapter); + status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); rxq->id = le16_to_cpu(resp->id); rxq->created = true; } + spin_unlock(&adapter->mbox_lock); return status; } -/* Generic destroyer function for all types of queues */ +/* Generic destroyer function for all types of queues + * Uses Mbox + */ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, int queue_type) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_q_destroy *req = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_q_destroy *req; u8 subsys = 0, opcode = 0; int status; spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); switch (queue_type) { @@ -669,23 +719,27 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); req->id = cpu_to_le16(q->id); - status = be_mbox_notify(adapter); + status = be_mbox_notify_wait(adapter); spin_unlock(&adapter->mbox_lock); return status; } -/* Create an rx filtering policy configuration on an i/f */ +/* Create an rx filtering policy configuration on an i/f + * Uses mbox + */ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_if_create *req = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_if_create *req; int status; spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -694,10 +748,11 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, req->capability_flags = cpu_to_le32(flags); req->enable_flags = cpu_to_le32(flags); + req->pmac_invalid = pmac_invalid; if (!pmac_invalid) memcpy(req->mac_addr, mac, ETH_ALEN); - status = be_mbox_notify(adapter); + status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_if_create *resp = embedded_payload(wrb); *if_handle = le32_to_cpu(resp->interface_id); @@ -709,14 +764,17 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, return status; } +/* Uses mbox */ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_if_destroy *req = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_if_destroy *req; int status; spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); @@ -724,7 +782,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); req->interface_id = cpu_to_le32(interface_id); - status = be_mbox_notify(adapter); + + status = be_mbox_notify_wait(adapter); spin_unlock(&adapter->mbox_lock); @@ -733,20 +792,22 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) /* Get stats is a non embedded command: the request is not embedded inside * WRB but is a separate dma memory block + * Uses asynchronous MCC */ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_get_stats *req = nonemb_cmd->va; - struct be_sge *sge = nonembedded_sgl(wrb); - int status; + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_stats *req; + struct be_sge *sge; - spin_lock(&adapter->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); + spin_lock_bh(&adapter->mcc_lock); - memset(req, 0, sizeof(*req)); + wrb = wrb_from_mccq(adapter); + req = nonemb_cmd->va; + sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); + wrb->tag0 = OPCODE_ETH_GET_STATISTICS; be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_GET_STATISTICS, sizeof(*req)); @@ -754,59 +815,61 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); - status = be_mbox_notify(adapter); - if (!status) { - struct be_cmd_resp_get_stats *resp = nonemb_cmd->va; - be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); - } + be_mcc_notify(adapter); - spin_unlock(&adapter->mbox_lock); - return status; + spin_unlock_bh(&adapter->mcc_lock); + return 0; } +/* Uses synchronous mcc */ int be_cmd_link_status_query(struct be_adapter *adapter, bool *link_up) { - struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); - struct be_cmd_req_link_status *req = embedded_payload(wrb); + struct be_mcc_wrb *wrb; + struct be_cmd_req_link_status *req; int status; - spin_lock(&adapter->mbox_lock); + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + req = embedded_payload(wrb); *link_up = false; - memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req)); - status = be_mbox_notify(adapter); + st |