aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-02-08 07:51:01 +0000
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-03-17 01:08:29 -0700
commit091a6246869cec2ac66e897b436f7fd59ec4d316 (patch)
tree11bfc09e746e5bbe953e81410071e291c718a991 /drivers/net/ethernet/intel
parentfd0db0ed02a6abce5427e90d1e8522322107d62b (diff)
ixgbe: Write gso_segs and bytcount to the ring sooner
This change makes it so that gso_segs and bytecount are written to the ring sooner. This helps to simplify the logic for the two since segmentation offloads can now update them within their own function. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Stephen Ko <stephen.s.ko@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c35
2 files changed, 21 insertions, 25 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 910847a901e..5f943d3f85c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -532,9 +532,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
*hdr_len = sizeof(struct fcoe_crc_eof);
/* hdr_len includes fc_hdr if FCoE LSO is enabled */
- if (skb_is_gso(skb))
- *hdr_len += (skb_transport_offset(skb) +
- sizeof(struct fc_frame_header));
+ if (skb_is_gso(skb)) {
+ *hdr_len += skb_transport_offset(skb) +
+ sizeof(struct fc_frame_header);
+ /* update gso_segs and bytecount */
+ first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
+ skb_shinfo(skb)->gso_size);
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+ }
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c05d560c004..40d729eb144 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -763,6 +763,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
/* clear next_to_watch to prevent false hangs */
tx_buffer->next_to_watch = NULL;
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -771,13 +775,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
do {
ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
- if (likely(tx_desc == eop_desc)) {
+ if (likely(tx_desc == eop_desc))
eop_desc = NULL;
- total_bytes += tx_buffer->bytecount;
- total_packets += tx_buffer->gso_segs;
- }
-
tx_buffer++;
tx_desc++;
i++;
@@ -6593,9 +6593,14 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
0, IPPROTO_TCP, 0);
}
+ /* compute header lengths */
l4len = tcp_hdrlen(skb);
*hdr_len = skb_transport_offset(skb) + l4len;
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* mss_l4len_id: use 1 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -6757,7 +6762,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
u32 offset = 0;
u32 paylen = skb->len - hdr_len;
u16 i = tx_ring->next_to_use;
- u16 gso_segs;
#ifdef IXGBE_FCOE
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
@@ -6843,22 +6847,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_ring->next_to_use = i;
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
- gso_segs = skb_shinfo(skb)->gso_segs;
-#ifdef IXGBE_FCOE
- /* adjust for FCoE Sequence Offload */
- else if (tx_flags & IXGBE_TX_FLAGS_FSO)
- gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
- skb_shinfo(skb)->gso_size);
-#endif /* IXGBE_FCOE */
- else
- gso_segs = 1;
-
- /* multiply data chunks by size of headers */
- tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len);
- tx_buffer_info->gso_segs = gso_segs;
-
- netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer_info->bytecount);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
/* set the timestamp */
first->time_stamp = jiffies;
@@ -7071,6 +7060,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
/* if we have a HW VLAN tag being added default to the HW one */
if (vlan_tx_tag_present(skb)) {