diff options
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4vf/sge.c')
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 45 |
1 files changed, 27 insertions, 18 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 92170d50d9d..bdfa80ca5e3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -401,7 +401,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq, if (sdesc->skb) { if (need_unmap) unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); - kfree_skb(sdesc->skb); + dev_consume_skb_any(sdesc->skb); sdesc->skb = NULL; } @@ -528,17 +528,21 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) */ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) { + u32 val; + /* * The SGE keeps track of its Producer and Consumer Indices in terms * of Egress Queue Units so we can only tell it about integral numbers * of multiples of Free List Entries per Egress Queue Units ... */ if (fl->pend_cred >= FL_PER_EQ_UNIT) { + val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); + if (!is_t4(adapter->params.chip)) + val |= DBTYPE(1); wmb(); t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, DBPRIO(1) | - QID(fl->cntxt_id) | - PIDX(fl->pend_cred / FL_PER_EQ_UNIT)); + QID(fl->cntxt_id) | val); fl->pend_cred %= FL_PER_EQ_UNIT; } } @@ -1271,7 +1275,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) * need it any longer. */ inline_tx_skb(skb, &txq->q, cpl + 1); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); } else { /* * Write the skb's Scatter/Gather list into the TX Packet CPL @@ -1350,7 +1354,7 @@ out_free: * An error of some sort happened. Free the TX skb and tell the * OS that we've "dealt" with the packet ... */ - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1392,8 +1396,9 @@ static inline void copy_frags(struct sk_buff *skb, * Builds an sk_buff from the given packet gather list. Returns the * sk_buff or %NULL if sk_buff allocation failed. */ -struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, - unsigned int skb_len, unsigned int pull_len) +static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, + unsigned int pull_len) { struct sk_buff *skb; @@ -1439,7 +1444,7 @@ out: * Releases the pages of a packet gather list. We do not own the last * page on the list and do not free it. */ -void t4vf_pktgl_free(const struct pkt_gl *gl) +static void t4vf_pktgl_free(const struct pkt_gl *gl) { int frag; @@ -1477,8 +1482,11 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rxq->rspq.idx); - if (pkt->vlan_ex) - __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); + if (pkt->vlan_ex) { + __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q), + be16_to_cpu(pkt->vlan)); + rxq->stats.vlan_ex++; + } ret = napi_gro_frags(&rxq->rspq.napi); if (ret == GRO_HELD) @@ -1501,8 +1509,9 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, const struct pkt_gl *gl) { struct sk_buff *skb; - const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; - bool csum_ok = pkt->csum_calc && !pkt->err_vec; + const struct cpl_rx_pkt *pkt = (void *)rsp; + bool csum_ok = pkt->csum_calc && !pkt->err_vec && + (rspq->netdev->features & NETIF_F_RXCSUM); struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); /* @@ -1530,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, skb_record_rx_queue(skb, rspq->idx); rxq->stats.pkts++; - if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) && - !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { + if (csum_ok && !pkt->err_vec && + (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { if (!pkt->ip_frag) skb->ip_summed = CHECKSUM_UNNECESSARY; else { @@ -1545,7 +1554,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, if (pkt->vlan_ex) { rxq->stats.vlan_ex++; - __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan)); } netif_receive_skb(skb); @@ -1633,7 +1642,7 @@ static inline void rspq_next(struct sge_rspq *rspq) * on this queue. If the system is under memory shortage use a fairly * long delay to help recovery. */ -int process_responses(struct sge_rspq *rspq, int budget) +static int process_responses(struct sge_rspq *rspq, int budget) { struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); int budget_left = budget; @@ -1886,7 +1895,7 @@ static unsigned int process_intrq(struct adapter *adapter) * The MSI interrupt handler handles data events from SGE response queues as * well as error and other async events as they all use the same MSI vector. */ -irqreturn_t t4vf_intr_msi(int irq, void *cookie) +static irqreturn_t t4vf_intr_msi(int irq, void *cookie) { struct adapter *adapter = cookie; @@ -1943,7 +1952,7 @@ static void sge_rx_timer_cb(unsigned long data) struct sge_fl *fl = s->egr_map[id]; clear_bit(id, s->starving_fl); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* * Since we are accessing fl without a lock there's a |
