aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-10-27 15:53:06 +0000
committerDavid S. Miller <davem@davemloft.net>2009-10-28 01:20:32 -0700
commite694e964fc1241b4981873bdccce70438d5f0394 (patch)
tree5073282de0459052a9647ba247763c143d23ffc3 /drivers
parent85ad76b2f9c4956ec90c86298b22bb35c326e772 (diff)
igb: place a pointer to the netdev struct in the ring itself
This change adds a pointer to the netdev to the ring itself. The idea being at some point in the future it will be possible to support multiple netdevs from a single adapter struct. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/igb/igb.h3
-rw-r--r--drivers/net/igb/igb_main.c29
2 files changed, 16 insertions, 16 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 0c30c5e375c..2416c12af3f 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -175,9 +175,10 @@ struct igb_q_vector {
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
- void *desc; /* descriptor ring memory */
+ struct net_device *netdev; /* back pointer to net_device */
struct pci_dev *pdev; /* pci device for dma mapping */
dma_addr_t dma; /* phys address of the ring */
+ void *desc; /* descriptor ring memory */
unsigned int size; /* length of desc. ring in bytes */
unsigned int count; /* number of desc. in the ring */
u16 next_to_use;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 00f3f2db294..3dc8e88c518 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -101,7 +101,6 @@ static void igb_update_phy_info(unsigned long);
static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
- struct net_device *,
struct igb_ring *);
static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
struct net_device *);
@@ -437,6 +436,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
ring->pdev = adapter->pdev;
+ ring->netdev = adapter->netdev;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
@@ -447,6 +447,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
ring->pdev = adapter->pdev;
+ ring->netdev = adapter->netdev;
ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
/* set flag indicating ring supports SCTP checksum offload */
@@ -3550,9 +3551,10 @@ static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
mmiowb();
}
-static int __igb_maybe_stop_tx(struct net_device *netdev,
- struct igb_ring *tx_ring, int size)
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
{
+ struct net_device *netdev = tx_ring->netdev;
+
netif_stop_subqueue(netdev, tx_ring->queue_index);
/* Herbert's original patch had:
@@ -3571,19 +3573,17 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
return 0;
}
-static int igb_maybe_stop_tx(struct net_device *netdev,
- struct igb_ring *tx_ring, int size)
+static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
{
if (igb_desc_unused(tx_ring) >= size)
return 0;
- return __igb_maybe_stop_tx(netdev, tx_ring, size);
+ return __igb_maybe_stop_tx(tx_ring, size);
}
static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
- struct net_device *netdev,
struct igb_ring *tx_ring)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
unsigned int first;
unsigned int tx_flags = 0;
u8 hdr_len = 0;
@@ -3606,7 +3606,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
* + 1 desc for skb->data,
* + 1 desc for context descriptor,
* otherwise try next time */
- if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+ if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
/* this is a hard error */
return NETDEV_TX_BUSY;
}
@@ -3665,7 +3665,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */
- igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
+ igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
return NETDEV_TX_OK;
}
@@ -3684,7 +3684,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
* to a flow. Right now, performance is impacted slightly negatively
* if using multiple tx queues. If the stack breaks away from a
* single qdisc implementation, we can look at this again. */
- return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
+ return igb_xmit_frame_ring_adv(skb, tx_ring);
}
/**
@@ -4667,7 +4667,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx_ring;
- struct net_device *netdev = adapter->netdev;
+ struct net_device *netdev = tx_ring->netdev;
struct e1000_hw *hw = &adapter->hw;
struct igb_buffer *buffer_info;
struct sk_buff *skb;
@@ -4841,8 +4841,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
int *work_done, int budget)
{
struct igb_adapter *adapter = q_vector->adapter;
- struct net_device *netdev = adapter->netdev;
struct igb_ring *rx_ring = q_vector->rx_ring;
+ struct net_device *netdev = rx_ring->netdev;
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = rx_ring->pdev;
union e1000_adv_rx_desc *rx_desc , *next_rxd;
@@ -5018,8 +5018,7 @@ next_desc:
static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
int cleaned_count)
{
- struct igb_adapter *adapter = rx_ring->q_vector->adapter;
- struct net_device *netdev = adapter->netdev;
+ struct net_device *netdev = rx_ring->netdev;
union e1000_adv_rx_desc *rx_desc;
struct igb_buffer *buffer_info;
struct sk_buff *skb;