diff options
author | Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com> | 2010-02-25 23:14:37 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-02-26 02:10:10 -0800 |
commit | 43634e820e393ba48bb2efc3d330dee4203e80d8 (patch) | |
tree | 1b47a48304ed6dd5bb4d3390d04484e870a70649 /drivers | |
parent | 41a655ba5654e47847505c164f77f8190ca9ed27 (diff) |
ixgbe: Fix DMA mapping/unmapping issues when HWRSC is enabled on IOMMU enabled kernels
Work around 82599 HW issue when HWRSC is enabled on IOMMU enabled
kernels. 82599 HW is updating the header information after setting the
descriptor to done, resulting DMA mapping/unmapping issues on IOMMU
enabled systems. To work around the issue delay unmapping of first packet
that carries the header information until end of packet is reached.
Signed-off-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 32 |
1 files changed, 29 insertions, 3 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 330879004ea..4a0102218d3 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -818,6 +818,12 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, return skb; } +struct ixgbe_rsc_cb { + dma_addr_t dma; +}; + +#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) + static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, int *work_done, int work_to_do) @@ -867,9 +873,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, rx_buffer_info->skb = NULL; if (rx_buffer_info->dma) { - pci_unmap_single(pdev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - PCI_DMA_FROMDEVICE); + if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && + (!(staterr & IXGBE_RXD_STAT_EOP)) && + (!(skb->prev))) + /* + * When HWRSC is enabled, delay unmapping + * of the first packet. It carries the + * header information, HW may still + * access the header after the writeback. + * Only unmap it when EOP is reached + */ + IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; + else + pci_unmap_single(pdev, rx_buffer_info->dma, + rx_ring->rx_buf_len, + PCI_DMA_FROMDEVICE); rx_buffer_info->dma = 0; skb_put(skb, len); } @@ -917,6 +935,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (skb->prev) skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + if (IXGBE_RSC_CB(skb)->dma) + pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, + rx_ring->rx_buf_len, + PCI_DMA_FROMDEVICE); if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; else @@ -3104,6 +3126,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, rx_buffer_info->skb = NULL; do { struct sk_buff *this = skb; + if (IXGBE_RSC_CB(this)->dma) + pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, + rx_ring->rx_buf_len, + PCI_DMA_FROMDEVICE); skb = skb->prev; dev_kfree_skb(this); } while (skb); |