diff options
author | David S. Miller <davem@davemloft.net> | 2011-08-20 17:25:36 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-08-20 17:25:36 -0700 |
commit | ca1ba7caa68520864e4b9227e67f3bbc6fed373b (patch) | |
tree | 84010e15b506f0506c15dbf03794dd8b776074ea | |
parent | 6461be3a54f802e00d5dcba3537271f92a90eaf3 (diff) | |
parent | 66f32a8b97f11ad73d2e7b8c192c55febb20b425 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next
Conflicts:
drivers/net/ethernet/intel/e1000e/netdev.c
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/e1000.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/ethtool.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/netdev.c | 199 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 554 |
7 files changed, 445 insertions, 359 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 8533ad7f355..fa72052a003 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -461,8 +461,9 @@ struct e1000_info { #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) -#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) #define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) #define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 6a0526a59a8..e0cbd6a0bde 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1195,7 +1195,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) goto err_nomem; } - rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); + rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended); rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { @@ -1221,7 +1221,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) ew32(RCTL, rctl); for (i = 0; i < rx_ring->count; i++) { - struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + union e1000_rx_desc_extended *rx_desc; struct sk_buff *skb; skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); @@ -1239,8 +1239,9 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) ret_val = 8; goto err_nomem; } - rx_desc->buffer_addr = - cpu_to_le64(rx_ring->buffer_info[i].dma); + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = + cpu_to_le64(rx_ring->buffer_info[i].dma); memset(skb->data, 0x00, skb->len); } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index b1f925bfb8b..9742bc603ca 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -56,7 +56,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION +#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -192,7 +192,7 @@ static void e1000e_dump(struct e1000_adapter *adapter) struct e1000_buffer *buffer_info; struct e1000_ring *rx_ring = adapter->rx_ring; union e1000_rx_desc_packet_split *rx_desc_ps; - struct e1000_rx_desc *rx_desc; + union e1000_rx_desc_extended *rx_desc; struct my_u1 { u64 a; u64 b; @@ -399,41 +399,70 @@ rx_ring_summary: break; default: case 0: - /* Legacy Receive Descriptor Format + /* Extended Receive Descriptor (Read) Format * - * +-----------------------------------------------------+ - * | Buffer Address [63:0] | - * +-----------------------------------------------------+ - * | VLAN Tag | Errors | Status 0 | Packet csum | Length | - * +-----------------------------------------------------+ - * 63 48 47 40 39 32 31 16 15 0 + * +-----------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * 8 | Reserved | + * +-----------------------------------------------------+ */ - printk(KERN_INFO "Rl[desc] [address 63:0 ] " - "[vl er S cks ln] [bi->dma ] [bi->skb] " - "<-- Legacy format\n"); - for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { - rx_desc = E1000_RX_DESC(*rx_ring, i); + printk(KERN_INFO "R [desc] [buf addr 63:0 ] " + "[reserved 63:0 ] [bi->dma ] " + "[bi->skb] <-- Ext (Read) format\n"); + /* Extended Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 24 23 4 3 0 + * +------------------------------------------------------+ + * | RSS Hash | | | | + * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | + * | Packet | IP | | | Type | + * | Checksum | Ident | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + printk(KERN_INFO "RWB[desc] [cs ipid mrq] " + "[vt ln xe xs] " + "[bi->skb] <-- Ext (Write-Back) format\n"); + + for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; - u0 = (struct my_u0 *)rx_desc; - printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " - "%016llX %p", i, - (unsigned long long)le64_to_cpu(u0->a), - (unsigned long long)le64_to_cpu(u0->b), - (unsigned long long)buffer_info->dma, - buffer_info->skb); + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + buffer_info->skb); + } else { + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb); + + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, + 1, + phys_to_virt + (buffer_info->dma), + adapter->rx_buffer_len, + true); + } + if (i == rx_ring->next_to_use) printk(KERN_CONT " NTU\n"); else if (i == rx_ring->next_to_clean) printk(KERN_CONT " NTC\n"); else printk(KERN_CONT "\n"); - - if (netif_msg_pktdata(adapter)) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt(buffer_info->dma), - adapter->rx_buffer_len, true); } } @@ -576,7 +605,7 @@ static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) } /** - * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * e1000_alloc_rx_buffers - Replace used receive buffers * @adapter: address of board private structure **/ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, @@ -585,7 +614,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_rx_desc *rx_desc; + union e1000_rx_desc_extended *rx_desc; struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; @@ -619,8 +648,8 @@ map_skb: break; } - rx_desc = E1000_RX_DESC(*rx_ring, i); - rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { /* @@ -761,7 +790,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - struct e1000_rx_desc *rx_desc; + union e1000_rx_desc_extended *rx_desc; struct e1000_ring *rx_ring = adapter->rx_ring; struct e1000_buffer *buffer_info; struct sk_buff *skb; @@ -802,8 +831,8 @@ check_page: PAGE_SIZE, DMA_FROM_DEVICE); - rx_desc = E1000_RX_DESC(*rx_ring, i); - rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); if (unlikely(++i == rx_ring->count)) i = 0; @@ -841,28 +870,27 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_rx_desc *rx_desc, *next_rxd; + union e1000_rx_desc_extended *rx_desc, *next_rxd; struct e1000_buffer *buffer_info, *next_buffer; - u32 length; + u32 length, staterr; unsigned int i; int cleaned_count = 0; bool cleaned = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; - rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); buffer_info = &rx_ring->buffer_info[i]; - while (rx_desc->status & E1000_RXD_STAT_DD) { + while (staterr & E1000_RXD_STAT_DD) { struct sk_buff *skb; - u8 status; if (*work_done >= work_to_do) break; (*work_done)++; rmb(); /* read descriptor and rx_buffer_info after status DD */ - status = rx_desc->status; skb = buffer_info->skb; buffer_info->skb = NULL; @@ -871,7 +899,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, i++; if (i == rx_ring->count) i = 0; - next_rxd = E1000_RX_DESC(*rx_ring, i); + next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); prefetch(next_rxd); next_buffer = &rx_ring->buffer_info[i]; @@ -884,7 +912,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, DMA_FROM_DEVICE); buffer_info->dma = 0; - length = le16_to_cpu(rx_desc->length); + length = le16_to_cpu(rx_desc->wb.upper.length); /* * !EOP means multiple descriptors were used to store a single @@ -893,7 +921,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, * next frame that _does_ have the EOP bit set, as it is by * definition only a frame fragment */ - if (unlikely(!(status & E1000_RXD_STAT_EOP))) + if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) adapter->flags2 |= FLAG2_IS_DISCARDING; if (adapter->flags2 & FLAG2_IS_DISCARDING) { @@ -901,12 +929,12 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, e_dbg("Receive packet consumed multiple buffers\n"); /* recycle */ buffer_info->skb = skb; - if (status & E1000_RXD_STAT_EOP) + if (staterr & E1000_RXD_STAT_EOP) adapter->flags2 &= ~FLAG2_IS_DISCARDING; goto next_desc; } - if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { /* recycle */ buffer_info->skb = skb; goto next_desc; @@ -944,15 +972,15 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, skb_put(skb, length); /* Receive Checksum Offload */ - e1000_rx_checksum(adapter, - (u32)(status) | - ((u32)(rx_desc->errors) << 24), - le16_to_cpu(rx_desc->csum), skb); + e1000_rx_checksum(adapter, staterr, + le16_to_cpu(rx_desc->wb.lower.hi_dword. + csum_ip.csum), skb); - e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); next_desc: - rx_desc->status = 0; + rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= E1000_RX_BUFFER_WRITE) { @@ -964,6 +992,8 @@ next_desc: /* use prefetched values */ rx_desc = next_rxd; buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } rx_ring->next_to_clean = i; @@ -1347,35 +1377,34 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_rx_desc *rx_desc, *next_rxd; + union e1000_rx_desc_extended *rx_desc, *next_rxd; struct e1000_buffer *buffer_info, *next_buffer; - u32 length; + u32 length, staterr; unsigned int i; int cleaned_count = 0; bool cleaned = false; unsigned int total_rx_bytes=0, total_rx_packets=0; i = rx_ring->next_to_clean; - rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); buffer_info = &rx_ring->buffer_info[i]; - while (rx_desc->status & E1000_RXD_STAT_DD) { + while (staterr & E1000_RXD_STAT_DD) { struct sk_buff *skb; - u8 status; if (*work_done >= work_to_do) break; (*work_done)++; rmb(); /* read descriptor and rx_buffer_info after status DD */ - status = rx_desc->status; skb = buffer_info->skb; buffer_info->skb = NULL; ++i; if (i == rx_ring->count) i = 0; - next_rxd = E1000_RX_DESC(*rx_ring, i); + next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); prefetch(next_rxd); next_buffer = &rx_ring->buffer_info[i]; @@ -1386,23 +1415,22 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, DMA_FROM_DEVICE); buffer_info->dma = 0; - length = le16_to_cpu(rx_desc->length); + length = le16_to_cpu(rx_desc->wb.upper.length); /* errors is only valid for DD + EOP descriptors */ - if (unlikely((status & E1000_RXD_STAT_EOP) && - (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { - /* recycle both page and skb */ - buffer_info->skb = skb; - /* an error means any chain goes out the window - * too */ - if (rx_ring->rx_skb_top) - dev_kfree_skb_irq(rx_ring->rx_skb_top); - rx_ring->rx_skb_top = NULL; - goto next_desc; + if (unlikely((staterr & E1000_RXD_STAT_EOP) && + (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb_irq(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; } #define rxtop (rx_ring->rx_skb_top) - if (!(status & E1000_RXD_STAT_EOP)) { + if (!(staterr & E1000_RXD_STAT_EOP)) { /* this descriptor is only the beginning (or middle) */ if (!rxtop) { /* this is the beginning of a chain */ @@ -1457,10 +1485,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, } /* Receive Checksum Offload XXX recompute due to CRC strip? */ - e1000_rx_checksum(adapter, - (u32)(status) | - ((u32)(rx_desc->errors) << 24), - le16_to_cpu(rx_desc->csum), skb); + e1000_rx_checksum(adapter, staterr, + le16_to_cpu(rx_desc->wb.lower.hi_dword. + csum_ip.csum), skb); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1473,11 +1500,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, goto next_desc; } - e1000_receive_skb(adapter, netdev, skb, status, - rx_desc->special); + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); next_desc: - rx_desc->status = 0; + rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); /* return some buffers to hardware, one at a time is too slow */ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { @@ -1489,6 +1516,8 @@ next_desc: /* use prefetched values */ rx_desc = next_rxd; buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } rx_ring->next_to_clean = i; @@ -2887,6 +2916,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) break; } + /* Enable Extended Status in all Receive Descriptors */ + rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + /* * 82571 and greater support packet-split where the protocol * header is placed in skb->data and the packet data is @@ -2912,9 +2945,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) if (adapter->rx_ps_pages) { u32 psrctl = 0; - /* Configure extra packet-split registers */ - rfctl = er32(RFCTL); - rfctl |= E1000_RFCTL_EXTEN; /* * disable packet split support for IPv6 extension headers, * because some malformed IPv6 headers can hang the Rx @@ -2922,8 +2952,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) rfctl |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); - ew32(RFCTL, rfctl); - /* Enable Packet split descriptors */ rctl |= E1000_RCTL_DTYP_PS; @@ -2946,6 +2974,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ew32(PSRCTL, psrctl); } + ew32(RFCTL, rfctl); ew32(RCTL, rctl); /* just started the receive unit, no need to restart */ adapter->flags &= ~FLAG_RX_RESTART_NOW; @@ -2971,11 +3000,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) adapter->clean_rx = e1000_clean_rx_irq_ps; adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { - rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); + rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); adapter->clean_rx = e1000_clean_jumbo_rx_irq; adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; } else { - rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); + rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e04a8e49e6d..378ce46a7f9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -91,13 +91,16 @@ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define IXGBE_TX_FLAGS_CSUM (u32)(1) -#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) -#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) -#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) -#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4) -#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5) +#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1) +#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2) +#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3) +#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4) +#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) +#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) +#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 7) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 #define IXGBE_MAX_RSC_INT_RATE 162760 @@ -141,14 +144,14 @@ struct vf_macvlans { /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct ixgbe_tx_buffer { - struct sk_buff *skb; - dma_addr_t dma; + union ixgbe_adv_tx_desc *next_to_watch; unsigned long time_stamp; - u16 length; - u16 next_to_watch; - unsigned int bytecount; + dma_addr_t dma; + u32 length; + u32 tx_flags; + struct sk_buff *skb; + u32 bytecount; u16 gso_segs; - u8 mapped_as_page; }; struct ixgbe_rx_buffer { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 0ace6ce1d0b..da6d53e7af9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -414,7 +414,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; -#ifdef CONFIG_FCOE +#ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 824edae7786..e9b992fe5e4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -241,10 +241,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, */ if (lastsize == bufflen) { if (j >= IXGBE_BUFFCNT_MAX) { - e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " - "not enough user buffers. We need an extra " - "buffer because lastsize is bufflen.\n", - xid, i, j, dmacount, (u64)addr); + printk_once("Will NOT use DDP since there are not " + "enough user buffers. We need an extra " + "buffer because lastsize is bufflen. " + "xid=%x:%d,%d,%d:addr=%llx\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b73194c1c44..e8aad76fa53 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -385,7 +385,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) tx_ring = adapter->tx_ring[n]; tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", + pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", n, tx_ring->next_to_use, tx_ring->next_to_clean, (u64)tx_buffer_info->dma, tx_buffer_info->length, @@ -424,7 +424,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) tx_buffer_info = &tx_ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; pr_info("T [0x%03X] %016llX %016llX %016llX" - " %04X %3X %016llX %p", i, + " %04X %p %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)tx_buffer_info->dma, @@ -643,27 +643,31 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, } } -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *tx_buffer_info) +static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring, + struct ixgbe_tx_buffer *tx_buffer) { - if (tx_buffer_info->dma) { - if (tx_buffer_info->mapped_as_page) - dma_unmap_page(tx_ring->dev, - tx_buffer_info->dma, - tx_buffer_info->length, - DMA_TO_DEVICE); + if (tx_buffer->dma) { + if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE) + dma_unmap_page(ring->dev, + tx_buffer->dma, + tx_buffer->length, + DMA_TO_DEVICE); else - dma_unmap_single(tx_ring->dev, - tx_buffer_info->dma, - tx_buffer_info->length, - DMA_TO_DEVICE); - tx_buffer_info->dma = 0; + dma_unmap_single(ring->dev, + tx_buffer->dma, + tx_buffer->length, + DMA_TO_DEVICE); } - if (tx_buffer_info->skb) { + tx_buffer->dma = 0; +} + +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *tx_buffer_info) +{ + ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info); + if (tx_buffer_info->skb) dev_kfree_skb_any(tx_buffer_info->skb); - tx_buffer_info->skb = NULL; - } - tx_buffer_info->time_stamp = 0; + tx_buffer_info->skb = NULL; /* tx_buffer_info must be completely set up in the transmit path */ } @@ -797,56 +801,72 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring) { struct ixgbe_adapter *adapter = q_vector->adapter; - union ixgbe_adv_tx_desc *tx_desc, *eop_desc; - struct ixgbe_tx_buffer *tx_buffer_info; + struct ixgbe_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; - u16 i, eop, count = 0; + u16 i = tx_ring->next_to_clean; + u16 count; - i = tx_ring->next_to_clean; - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); - while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && - (count < q_vector->tx.work_limit)) { - bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ - for ( ; !cleaned; count++) { - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); - tx_buffer_info = &tx_ring->tx_buffer_info[i]; + for (count = 0; count < q_vector->tx.work_limit; count++) { + union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + break; + /* count the packet as being completed */ + tx_ring->tx_stats.completed++; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* prevent any other reads prior to eop_desc being verified */ + rmb(); + + do { + ixgbe_unmap_tx_resource(tx_ring, tx_buffer); tx_desc->wb.status = 0; - cleaned = (i == eop); + if (likely(tx_desc == eop_desc)) { + eop_desc = NULL; + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + } + tx_buffer++; + tx_desc++; i++; - if (i == tx_ring->count) + if (unlikely(i == tx_ring->count)) { i = 0; - if (cleaned && tx_buffer_info->skb) { - total_bytes += tx_buffer_info->bytecount; - total_packets += tx_buffer_info->gso_segs; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0); } - ixgbe_unmap_and_free_tx_resource(tx_ring, - tx_buffer_info); - } - - tx_ring->tx_stats.completed++; - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + } while (eop_desc); } tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; tx_ring->stats.packets += total_packets; - u64_stats_update_begin(&tx_ring->syncp); + u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ struct ixgbe_hw *hw = &adapter->hw; - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); e_err(drv, "Detected Tx Unit Hang\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" @@ -858,8 +878,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, tx_ring->queue_index, IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), - tx_ring->next_to_use, eop, - tx_ring->tx_buffer_info[eop].time_stamp, jiffies); + tx_ring->next_to_use, i, + tx_ring->tx_buffer_info[i].time_stamp, jiffies); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); @@ -3597,7 +3617,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) /* reconfigure the hardware */ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { -#ifdef CONFIG_FCOE +#ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif @@ -6351,7 +6371,7 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(tx_flags & IXGBE_TX_FLAGS_VLAN)) + if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) return false; } else { u8 l4_hdr = 0; @@ -6408,185 +6428,179 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, return (skb->ip_summed == CHECKSUM_PARTIAL); } -static int ixgbe_tx_map(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, - unsigned int first, const u8 hdr_len) +static __le32 ixgbe_tx_cmd_type(u32 tx_flags) { - struct device *dev = tx_ring->dev; - struct ixgbe_tx_buffer *tx_buffer_info; - unsigned int len; - unsigned int total = skb->len; - unsigned int offset = 0, size, count = 0; - unsigned int nr_frags = skb_shinfo(skb)->nr_frags; - unsigned int f; - unsigned int bytecount = skb->len; - u16 gso_segs = 1; - u16 i; + /* set type for advanced descriptor with frame checksum insertion */ + __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_IFCS | + IXGBE_ADVTXD_DCMD_DEXT); - i = tx_ring->next_to_use; + /* set HW vlan bit if vlan is present */ + if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); - if (tx_flags & IXGBE_TX_FLAGS_FCOE) - /* excluding fcoe_crc_eof for FCoE */ - total -= sizeof(struct fcoe_crc_eof); + /* set segmentation enable bits for TSO/FSO */ +#ifdef IXGBE_FCOE + if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO)) +#else + if (tx_flags & IXGBE_TX_FLAGS_TSO) +#endif + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); - len = min(skb_headlen(skb), total); - while (len) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); - - tx_buffer_info->length = size; - tx_buffer_info->mapped_as_page = false; - tx_buffer_info->dma = dma_map_single(dev, - skb->data + offset, - size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, tx_buffer_info->dma)) - goto dma_error; - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; + return cmd_type; +} - len -= size; - total -= size; - offset += size; - count++; +static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen) +{ + __le32 olinfo_status = + cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); - if (len) { - i++; - if (i == tx_ring->count) - i = 0; - } + if (tx_flags & IXGBE_TX_FLAGS_TSO) { + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM | + (1 << IXGBE_ADVTXD_IDX_SHIFT)); + /* enble IPv4 checksum for TSO */ + if (tx_flags & IXGBE_TX_FLAGS_IPV4) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); } - for (f = 0; f < nr_frags; f++) { - struct skb_frag_struct *frag; + /* enable L4 checksum for TSO and TX checksum offload */ + if (tx_flags & IXGBE_TX_FLAGS_CSUM) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); - frag = &skb_shinfo(skb)->frags[f]; - len = min((unsigned int)frag->size, total); - offset = frag->page_offset; +#ifdef IXGBE_FCOE + /* use index 1 context for FCOE/FSO */ + if (tx_flags & IXGBE_TX_FLAGS_FCOE) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC | + (1 << IXGBE_ADVTXD_IDX_SHIFT)); - while (len) { - i++; - if (i == tx_ring->count) - i = 0; +#endif + return olinfo_status; +} - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); - - tx_buffer_info->length = size; - tx_buffer_info->dma = dma_map_page(dev, - frag->page, - offset, size, - DMA_TO_DEVICE); - tx_buffer_info->mapped_as_page = true; - if (dma_mapping_error(dev, tx_buffer_info->dma)) - goto dma_error; - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; - - len -= size; - total -= size; - offset += size; - count++; +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ + IXGBE_TXD_CMD_RS) + +static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, + struct sk_buff *skb, + struct ixgbe_tx_buffer *first, + u32 tx_flags, + const u8 hdr_len) +{ + struct device *dev = tx_ring->dev; + struct ixgbe_tx_buffer *tx_buffer_info; + union ixgbe_adv_tx_desc *tx_desc; + dma_addr_t dma; + __le32 cmd_type, olinfo_status; + struct skb_frag_struct *frag; + unsigned int f = 0; + unsigned int data_len = skb->data_len; + unsigned int size = skb_headlen(skb); + u32 offset = 0; + u32 paylen = skb->len - hdr_len; + u16 i = tx_ring->next_to_use; + u16 gso_segs; + +#ifdef IXGBE_FCOE + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { + if (data_len >= sizeof(struct fcoe_crc_eof)) { + data_len -= sizeof(struct fcoe_crc_eof); + } else { + size -= sizeof(struct fcoe_crc_eof) - data_len; + data_len = 0; } - if (total == 0) - break; } - if (tx_flags & IXGBE_TX_FLAGS_TSO) - gso_segs = skb_shinfo(skb)->gso_segs; -#ifdef IXGBE_FCOE - /* adjust for FCoE Sequence Offload */ - else if (tx_flags & IXGBE_TX_FLAGS_FSO) - gso_segs = DIV_ROUND_UP(skb->len - hdr_len, - skb_shinfo(skb)->gso_size); -#endif /* IXGBE_FCOE */ - bytecount += (gso_segs - 1) * hdr_len; +#endif + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) + goto dma_error; - /* multiply data chunks |