aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c1342
1 files changed, 898 insertions, 444 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 93f2b7a2216..a363148d019 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -111,6 +111,7 @@
* 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
* 0.58: 30 Oct 2006: Added support for sideband management unit.
* 0.59: 30 Oct 2006: Added support for recoverable error.
+ * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -127,7 +128,7 @@
#else
#define DRIVERNAPI
#endif
-#define FORCEDETH_VERSION "0.59"
+#define FORCEDETH_VERSION "0.60"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -173,9 +174,10 @@
#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
-#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
-#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */
-#define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */
+#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
+#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
+#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
+#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
enum {
NvRegIrqStatus = 0x000,
@@ -210,7 +212,7 @@ enum {
* NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
*/
NvRegPollingInterval = 0x00c,
-#define NVREG_POLL_DEFAULT_THROUGHPUT 970
+#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
#define NVREG_POLL_DEFAULT_CPU 13
NvRegMSIMap0 = 0x020,
NvRegMSIMap1 = 0x024,
@@ -304,8 +306,8 @@ enum {
#define NVREG_TXRXCTL_RESET 0x0010
#define NVREG_TXRXCTL_RXCHECK 0x0400
#define NVREG_TXRXCTL_DESC_1 0
-#define NVREG_TXRXCTL_DESC_2 0x02100
-#define NVREG_TXRXCTL_DESC_3 0x02200
+#define NVREG_TXRXCTL_DESC_2 0x002100
+#define NVREG_TXRXCTL_DESC_3 0xc02200
#define NVREG_TXRXCTL_VLANSTRIP 0x00040
#define NVREG_TXRXCTL_VLANINS 0x00080
NvRegTxRingPhysAddrHigh = 0x148,
@@ -487,7 +489,8 @@ union ring_type {
/* Miscelaneous hardware related defines: */
#define NV_PCI_REGSZ_VER1 0x270
-#define NV_PCI_REGSZ_VER2 0x604
+#define NV_PCI_REGSZ_VER2 0x2d4
+#define NV_PCI_REGSZ_VER3 0x604
/* various timeout delays: all in usec */
#define NV_TXRX_RESET_DELAY 4
@@ -518,12 +521,6 @@ union ring_type {
#define TX_RING_MIN 64
#define RING_MAX_DESC_VER_1 1024
#define RING_MAX_DESC_VER_2_3 16384
-/*
- * Difference between the get and put pointers for the tx ring.
- * This is used to throttle the amount of data outstanding in the
- * tx ring.
- */
-#define TX_LIMIT_DIFFERENCE 1
/* rx/tx mac addr + type + vlan + align + slack*/
#define NV_RX_HEADERS (64)
@@ -611,9 +608,6 @@ static const struct nv_ethtool_str nv_estats_str[] = {
{ "tx_carrier_errors" },
{ "tx_excess_deferral" },
{ "tx_retry_error" },
- { "tx_deferral" },
- { "tx_packets" },
- { "tx_pause" },
{ "rx_frame_error" },
{ "rx_extra_byte" },
{ "rx_late_collision" },
@@ -626,11 +620,17 @@ static const struct nv_ethtool_str nv_estats_str[] = {
{ "rx_unicast" },
{ "rx_multicast" },
{ "rx_broadcast" },
+ { "rx_packets" },
+ { "rx_errors_total" },
+ { "tx_errors_total" },
+
+ /* version 2 stats */
+ { "tx_deferral" },
+ { "tx_packets" },
{ "rx_bytes" },
+ { "tx_pause" },
{ "rx_pause" },
- { "rx_drop_frame" },
- { "rx_packets" },
- { "rx_errors_total" }
+ { "rx_drop_frame" }
};
struct nv_ethtool_stats {
@@ -643,9 +643,6 @@ struct nv_ethtool_stats {
u64 tx_carrier_errors;
u64 tx_excess_deferral;
u64 tx_retry_error;
- u64 tx_deferral;
- u64 tx_packets;
- u64 tx_pause;
u64 rx_frame_error;
u64 rx_extra_byte;
u64 rx_late_collision;
@@ -658,13 +655,22 @@ struct nv_ethtool_stats {
u64 rx_unicast;
u64 rx_multicast;
u64 rx_broadcast;
+ u64 rx_packets;
+ u64 rx_errors_total;
+ u64 tx_errors_total;
+
+ /* version 2 stats */
+ u64 tx_deferral;
+ u64 tx_packets;
u64 rx_bytes;
+ u64 tx_pause;
u64 rx_pause;
u64 rx_drop_frame;
- u64 rx_packets;
- u64 rx_errors_total;
};
+#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
+#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
+
/* diagnostics */
#define NV_TEST_COUNT_BASE 3
#define NV_TEST_COUNT_EXTENDED 4
@@ -691,6 +697,12 @@ static const struct register_test nv_registers_test[] = {
{ 0,0 }
};
+struct nv_skb_map {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned int dma_len;
+};
+
/*
* SMP locking:
* All hardware access under dev->priv->lock, except the performance
@@ -741,10 +753,12 @@ struct fe_priv {
/* rx specific fields.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
+ union ring_type get_rx, put_rx, first_rx, last_rx;
+ struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
+ struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
+ struct nv_skb_map *rx_skb;
+
union ring_type rx_ring;
- unsigned int cur_rx, refill_rx;
- struct sk_buff **rx_skbuff;
- dma_addr_t *rx_dma;
unsigned int rx_buf_sz;
unsigned int pkt_limit;
struct timer_list oom_kick;
@@ -761,15 +775,15 @@ struct fe_priv {
/*
* tx specific fields.
*/
+ union ring_type get_tx, put_tx, first_tx, last_tx;
+ struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
+ struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
+ struct nv_skb_map *tx_skb;
+
union ring_type tx_ring;
- unsigned int next_tx, nic_tx;
- struct sk_buff **tx_skbuff;
- dma_addr_t *tx_dma;
- unsigned int *tx_dma_len;
u32 tx_flags;
int tx_ring_size;
- int tx_limit_start;
- int tx_limit_stop;
+ int tx_stop;
/* vlan fields */
struct vlan_group *vlangrp;
@@ -921,16 +935,10 @@ static void free_rings(struct net_device *dev)
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
np->rx_ring.ex, np->ring_addr);
}
- if (np->rx_skbuff)
- kfree(np->rx_skbuff);
- if (np->rx_dma)
- kfree(np->rx_dma);
- if (np->tx_skbuff)
- kfree(np->tx_skbuff);
- if (np->tx_dma)
- kfree(np->tx_dma);
- if (np->tx_dma_len)
- kfree(np->tx_dma_len);
+ if (np->rx_skb)
+ kfree(np->rx_skb);
+ if (np->tx_skb)
+ kfree(np->tx_skb);
}
static int using_multi_irqs(struct net_device *dev)
@@ -1279,6 +1287,61 @@ static void nv_mac_reset(struct net_device *dev)
pci_push(base);
}
+static void nv_get_hw_stats(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ np->estats.tx_bytes += readl(base + NvRegTxCnt);
+ np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
+ np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
+ np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
+ np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
+ np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
+ np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
+ np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
+ np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
+ np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
+ np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
+ np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
+ np->estats.rx_runt += readl(base + NvRegRxRunt);
+ np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
+ np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
+ np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
+ np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
+ np->estats.rx_length_error += readl(base + NvRegRxLenErr);
+ np->estats.rx_unicast += readl(base + NvRegRxUnicast);
+ np->estats.rx_multicast += readl(base + NvRegRxMulticast);
+ np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
+ np->estats.rx_packets =
+ np->estats.rx_unicast +
+ np->estats.rx_multicast +
+ np->estats.rx_broadcast;
+ np->estats.rx_errors_total =
+ np->estats.rx_crc_errors +
+ np->estats.rx_over_errors +
+ np->estats.rx_frame_error +
+ (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
+ np->estats.rx_late_collision +
+ np->estats.rx_runt +
+ np->estats.rx_frame_too_long;
+ np->estats.tx_errors_total =
+ np->estats.tx_late_collision +
+ np->estats.tx_fifo_errors +
+ np->estats.tx_carrier_errors +
+ np->estats.tx_excess_deferral +
+ np->estats.tx_retry_error;
+
+ if (np->driver_data & DEV_HAS_STATISTICS_V2) {
+ np->estats.tx_deferral += readl(base + NvRegTxDef);
+ np->estats.tx_packets += readl(base + NvRegTxFrame);
+ np->estats.rx_bytes += readl(base + NvRegRxCnt);
+ np->estats.tx_pause += readl(base + NvRegTxPause);
+ np->estats.rx_pause += readl(base + NvRegRxPause);
+ np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
+ }
+}
+
/*
* nv_get_stats: dev->get_stats function
* Get latest stats value from the nic.
@@ -1289,10 +1352,19 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- /* It seems that the nic always generates interrupts and doesn't
- * accumulate errors internally. Thus the current values in np->stats
- * are already up to date.
- */
+ /* If the nic supports hw counters then retrieve latest values */
+ if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
+ nv_get_hw_stats(dev);
+
+ /* copy to net_device stats */
+ np->stats.tx_bytes = np->estats.tx_bytes;
+ np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
+ np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
+ np->stats.rx_crc_errors = np->estats.rx_crc_errors;
+ np->stats.rx_over_errors = np->estats.rx_over_errors;
+ np->stats.rx_errors = np->estats.rx_errors_total;
+ np->stats.tx_errors = np->estats.tx_errors_total;
+ }
return &np->stats;
}
@@ -1304,43 +1376,63 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
static int nv_alloc_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- unsigned int refill_rx = np->refill_rx;
- int nr;
+ struct ring_desc* less_rx;
- while (np->cur_rx != refill_rx) {
- struct sk_buff *skb;
-
- nr = refill_rx % np->rx_ring_size;
- if (np->rx_skbuff[nr] == NULL) {
-
- skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
- if (!skb)
- break;
+ less_rx = np->get_rx.orig;
+ if (less_rx-- == np->first_rx.orig)
+ less_rx = np->last_rx.orig;
+ while (np->put_rx.orig != less_rx) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
+ if (skb) {
skb->dev = dev;
- np->rx_skbuff[nr] = skb;
+ np->put_rx_ctx->skb = skb;
+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
+ skb->end-skb->data, PCI_DMA_FROMDEVICE);
+ np->put_rx_ctx->dma_len = skb->end-skb->data;
+ np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
+ wmb();
+ np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
+ if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
+ np->put_rx.orig = np->first_rx.orig;
+ if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
+ np->put_rx_ctx = np->first_rx_ctx;
} else {
- skb = np->rx_skbuff[nr];
+ return 1;
}
- np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
- skb->end-skb->data, PCI_DMA_FROMDEVICE);
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
+ }
+ return 0;
+}
+
+static int nv_alloc_rx_optimized(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ struct ring_desc_ex* less_rx;
+
+ less_rx = np->get_rx.ex;
+ if (less_rx-- == np->first_rx.ex)
+ less_rx = np->last_rx.ex;
+
+ while (np->put_rx.ex != less_rx) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
+ if (skb) {
+ skb->dev = dev;
+ np->put_rx_ctx->skb = skb;
+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
+ skb->end-skb->data, PCI_DMA_FROMDEVICE);
+ np->put_rx_ctx->dma_len = skb->end-skb->data;
+ np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
+ np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
wmb();
- np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
+ np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
+ if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
+ np->put_rx.ex = np->first_rx.ex;
+ if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
+ np->put_rx_ctx = np->first_rx_ctx;
} else {
- np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
- np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
- wmb();
- np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
+ return 1;
}
- dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
- dev->name, refill_rx);
- refill_rx++;
}
- np->refill_rx = refill_rx;
- if (np->cur_rx - refill_rx == np->rx_ring_size)
- return 1;
return 0;
}
@@ -1358,6 +1450,7 @@ static void nv_do_rx_refill(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
+ int retcode;
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -1367,7 +1460,11 @@ static void nv_do_rx_refill(unsigned long data)
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
- if (nv_alloc_rx(dev)) {
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ retcode = nv_alloc_rx(dev);
+ else
+ retcode = nv_alloc_rx_optimized(dev);
+ if (retcode) {
spin_lock_irq(&np->lock);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -1388,56 +1485,81 @@ static void nv_init_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
+ np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
+ else
+ np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
+ np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
+ np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
- np->cur_rx = np->rx_ring_size;
- np->refill_rx = 0;
- for (i = 0; i < np->rx_ring_size; i++)
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ for (i = 0; i < np->rx_ring_size; i++) {
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->rx_ring.orig[i].flaglen = 0;
- else
+ np->rx_ring.orig[i].buf = 0;
+ } else {
np->rx_ring.ex[i].flaglen = 0;
+ np->rx_ring.ex[i].txvlan = 0;
+ np->rx_ring.ex[i].bufhigh = 0;
+ np->rx_ring.ex[i].buflow = 0;
+ }
+ np->rx_skb[i].skb = NULL;
+ np->rx_skb[i].dma = 0;
+ }
}
static void nv_init_tx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
+ np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
+ else
+ np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
+ np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
+ np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
- np->next_tx = np->nic_tx = 0;
for (i = 0; i < np->tx_ring_size; i++) {
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->tx_ring.orig[i].flaglen = 0;
- else
+ np->tx_ring.orig[i].buf = 0;
+ } else {
np->tx_ring.ex[i].flaglen = 0;
- np->tx_skbuff[i] = NULL;
- np->tx_dma[i] = 0;
+ np->tx_ring.ex[i].txvlan = 0;
+ np->tx_ring.ex[i].bufhigh = 0;
+ np->tx_ring.ex[i].buflow = 0;
+ }
+ np->tx_skb[i].skb = NULL;
+ np->tx_skb[i].dma = 0;
}
}
static int nv_init_ring(struct net_device *dev)
{
+ struct fe_priv *np = netdev_priv(dev);
+
nv_init_tx(dev);
nv_init_rx(dev);
- return nv_alloc_rx(dev);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ return nv_alloc_rx(dev);
+ else
+ return nv_alloc_rx_optimized(dev);
}
-static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
+static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
{
struct fe_priv *np = netdev_priv(dev);
- dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
- dev->name, skbnr);
-
- if (np->tx_dma[skbnr]) {
- pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
- np->tx_dma_len[skbnr],
+ if (tx_skb->dma) {
+ pci_unmap_page(np->pci_dev, tx_skb->dma,
+ tx_skb->dma_len,
PCI_DMA_TODEVICE);
- np->tx_dma[skbnr] = 0;
+ tx_skb->dma = 0;
}
-
- if (np->tx_skbuff[skbnr]) {
- dev_kfree_skb_any(np->tx_skbuff[skbnr]);
- np->tx_skbuff[skbnr] = NULL;
+ if (tx_skb->skb) {
+ dev_kfree_skb_any(tx_skb->skb);
+ tx_skb->skb = NULL;
return 1;
} else {
return 0;
@@ -1450,11 +1572,16 @@ static void nv_drain_tx(struct net_device *dev)
unsigned int i;
for (i = 0; i < np->tx_ring_size; i++) {
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->tx_ring.orig[i].flaglen = 0;
- else
+ np->tx_ring.orig[i].buf = 0;
+ } else {
np->tx_ring.ex[i].flaglen = 0;
- if (nv_release_txskb(dev, i))
+ np->tx_ring.ex[i].txvlan = 0;
+ np->tx_ring.ex[i].bufhigh = 0;
+ np->tx_ring.ex[i].buflow = 0;
+ }
+ if (nv_release_txskb(dev, &np->tx_skb[i]))
np->stats.tx_dropped++;
}
}
@@ -1463,18 +1590,24 @@ static void nv_drain_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
+
for (i = 0; i < np->rx_ring_size; i++) {
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->rx_ring.orig[i].flaglen = 0;
- else
+ np->rx_ring.orig[i].buf = 0;
+ } else {
np->rx_ring.ex[i].flaglen = 0;
+ np->rx_ring.ex[i].txvlan = 0;
+ np->rx_ring.ex[i].bufhigh = 0;
+ np->rx_ring.ex[i].buflow = 0;
+ }
wmb();
- if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev, np->rx_dma[i],
- np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
+ if (np->rx_skb[i].skb) {
+ pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
+ np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
PCI_DMA_FROMDEVICE);
- dev_kfree_skb(np->rx_skbuff[i]);
- np->rx_skbuff[i] = NULL;
+ dev_kfree_skb(np->rx_skb[i].skb);
+ np->rx_skb[i].skb = NULL;
}
}
}
@@ -1485,6 +1618,11 @@ static void drain_ring(struct net_device *dev)
nv_drain_rx(dev);
}
+static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
+{
+ return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
+}
+
/*
* nv_start_xmit: dev->hard_start_xmit function
* Called with netif_tx_lock held.
@@ -1495,14 +1633,16 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 tx_flags = 0;
u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
unsigned int fragments = skb_shinfo(skb)->nr_frags;
- unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
- unsigned int start_nr = np->next_tx % np->tx_ring_size;
unsigned int i;
u32 offset = 0;
u32 bcnt;
u32 size = skb->len-skb->data_len;
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
- u32 tx_flags_vlan = 0;
+ u32 empty_slots;
+ struct ring_desc* put_tx;
+ struct ring_desc* start_tx;
+ struct ring_desc* prev_tx;
+ struct nv_skb_map* prev_tx_ctx;
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
@@ -1510,34 +1650,35 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
- spin_lock_irq(&np->lock);
-
- if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
- spin_unlock_irq(&np->lock);
+ empty_slots = nv_get_empty_tx_slots(np);
+ if (unlikely(empty_slots <= entries)) {
+ spin_lock_irq(&np->lock);
netif_stop_queue(dev);
+ np->tx_stop = 1;
+ spin_unlock_irq(&np->lock);
return NETDEV_TX_BUSY;
}
+ start_tx = put_tx = np->put_tx.orig;
+
/* setup the header buffer */
do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- nr = (nr + 1) % np->tx_ring_size;
-
- np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
+ np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
- np->tx_dma_len[nr] = bcnt;
+ np->put_tx_ctx->dma_len = bcnt;
+ put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
+ put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
- np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
- } else {
- np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
- np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
- }
tx_flags = np->tx_flags;
offset += bcnt;
size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.orig))
+ put_tx = np->first_tx.orig;
+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
} while (size);
/* setup the fragments */
@@ -1547,58 +1688,174 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
offset = 0;
do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- nr = (nr + 1) % np->tx_ring_size;
-
- np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
- PCI_DMA_TODEVICE);
- np->tx_dma_len[nr] = bcnt;
+ np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+ put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
+ put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
- np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
- } else {
- np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
- np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
- }
offset += bcnt;
size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.orig))
+ put_tx = np->first_tx.orig;
+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
} while (size);
}
/* set last fragment flag */
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
- } else {
- np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
+ prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
+
+ /* save skb in this slot's context area */
+ prev_tx_ctx->skb = skb;
+
+ if (skb_is_gso(skb))
+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
+ else
+ tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
+ NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
+
+ spin_lock_irq(&np->lock);
+
+ /* set tx flags */
+ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+ np->put_tx.orig = put_tx;
+
+ spin_unlock_irq(&np->lock);
+
+ dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
+ dev->name, entries, tx_flags_extra);
+ {
+ int j;
+ for (j=0; j<64; j++) {
+ if ((j%16) == 0)
+ dprintk("\n%03x:", j);
+ dprintk(" %02x", ((unsigned char*)skb->data)[j]);
+ }
+ dprintk("\n");
+ }
+
+ dev->trans_start = jiffies;
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ return NETDEV_TX_OK;
+}
+
+static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 tx_flags = 0;
+ u32 tx_flags_extra;
+ unsigned int fragments = skb_shinfo(skb)->nr_frags;
+ unsigned int i;
+ u32 offset = 0;
+ u32 bcnt;
+ u32 size = skb->len-skb->data_len;
+ u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ u32 empty_slots;
+ struct ring_desc_ex* put_tx;
+ struct ring_desc_ex* start_tx;
+ struct ring_desc_ex* prev_tx;
+ struct nv_skb_map* prev_tx_ctx;
+
+ /* add fragments to entries count */
+ for (i = 0; i < fragments; i++) {
+ entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ }
+
+ empty_slots = nv_get_empty_tx_slots(np);
+ if (unlikely(empty_slots <= entries)) {
+ spin_lock_irq(&np->lock);
+ netif_stop_queue(dev);
+ np->tx_stop = 1;
+ spin_unlock_irq(&np->lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ start_tx = put_tx = np->put_tx.ex;
+
+ /* setup the header buffer */
+ do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+ np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+ put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
+ put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
+ put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
+
+ tx_flags = NV_TX2_VALID;
+ offset += bcnt;
+ size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.ex))
+ put_tx = np->first_tx.ex;
+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
+ } while (size);
+
+ /* setup the fragments */
+ for (i = 0; i < fragments; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = frag->size;
+ offset = 0;
+
+ do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+ np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+ put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
+ put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
+ put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
+
+ offset += bcnt;
+ size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.ex))
+ put_tx = np->first_tx.ex;
+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
+ } while (size);
}
- np->tx_skbuff[nr] = skb;
+ /* set last fragment flag */
+ prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
+
+ /* save skb in this slot's context area */
+ prev_tx_ctx->skb = skb;
-#ifdef NETIF_F_TSO
if (skb_is_gso(skb))
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
-#endif
- tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
+ tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
/* vlan tag */
- if (np->vlangrp && vlan_tx_tag_present(skb)) {
- tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
+ if (likely(!np->vlangrp)) {
+ start_tx->txvlan = 0;
+ } else {
+ if (vlan_tx_tag_present(skb))
+ start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
+ else
+ start_tx->txvlan = 0;
}
+ spin_lock_irq(&np->lock);
+
/* set tx flags */
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
- } else {
- np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
- np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
- }
+ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+ np->put_tx.ex = put_tx;
+
+ spin_unlock_irq(&np->lock);
- dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
- dev->name, np->next_tx, entries, tx_flags_extra);
+ dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
+ dev->name, entries, tx_flags_extra);
{
int j;
for (j=0; j<64; j++) {
@@ -1609,12 +1866,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
dprintk("\n");
}
- np->next_tx += entries;
-
dev->trans_start = jiffies;
- spin_unlock_irq(&np->lock);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
- pci_push(get_hwbase(dev));
return NETDEV_TX_OK;
}
@@ -1627,26 +1880,22 @@ static void nv_tx_done(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u32 flags;
- unsigned int i;
- struct sk_buff *skb;
+ struct ring_desc* orig_get_tx = np->get_tx.orig;
- while (np->nic_tx != np->next_tx) {
- i = np->nic_tx % np->tx_ring_size;
+ while ((np->get_tx.orig != np->put_tx.orig) &&
+ !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
- else
- flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
+ dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
+ dev->name, flags);
+
+ pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
+ np->get_tx_ctx->dma_len,
+ PCI_DMA_TODEVICE);
+ np->get_tx_ctx->dma = 0;
- dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
- dev->name, np->nic_tx, flags);
- if (flags & NV_TX_VALID)
- break;
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
- skb = np->tx_skbuff[i];
- if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
- NV_TX_UNDERFLOW|NV_TX_ERROR)) {
+ if (flags & NV_TX_ERROR) {
if (flags & NV_TX_UNDERFLOW)
np->stats.tx_fifo_errors++;
if (flags & NV_TX_CARRIERLOST)
@@ -1654,14 +1903,14 @@ static void nv_tx_done(struct net_device *dev)
np->stats.tx_errors++;
} else {
np->stats.tx_packets++;
- np->stats.tx_bytes += skb->len;
+ np->stats.tx_bytes += np->get_tx_ctx->skb->len;
}
+ dev_kfree_skb_any(np->get_tx_ctx->skb);
+ np->get_tx_ctx->skb = NULL;
}
} else {
if (flags & NV_TX2_LASTPACKET) {
- skb = np->tx_skbuff[i];
- if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
- NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
+ if (flags & NV_TX2_ERROR) {
if (flags & NV_TX2_UNDERFLOW)
np->stats.tx_fifo_errors++;
if (flags & NV_TX2_CARRIERLOST)
@@ -1669,15 +1918,56 @@ static void nv_tx_done(struct net_device *dev)
np->stats.tx_errors++;
} else {
np->stats.tx_packets++;
- np->stats.tx_bytes += skb->len;
+ np->stats.tx_bytes += np->get_tx_ctx->skb->len;
}
+ dev_kfree_skb_any(np->get_tx_ctx->skb);
+ np->get_tx_ctx->skb = NULL;
}
}
- nv_release_txskb(dev, i);
- np->nic_tx++;
+ if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
+ np->get_tx.orig = np->first_tx.orig;
+ if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
+ np->get_tx_ctx = np->first_tx_ctx;
}
- if (np->next_tx - np->nic_tx < np->tx_limit_start)
+ if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
+ np->tx_stop = 0;
netif_wake_queue(dev);
+ }
+}
+
+static void nv_tx_done_optimized(struct net_device *dev, int limit)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 flags;
+ struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
+
+ while ((np->get_tx.ex != np->put_tx.ex) &&
+ !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
+ (limit-- > 0)) {
+
+ dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
+ dev->name, flags);
+
+ pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
+ np->get_tx_ctx->dma_len,
+ PCI_DMA_TODEVICE);
+ np->get_tx_ctx->dma = 0;
+
+ if (flags & NV_TX2_LASTPACKET) {
+ if (!(flags & NV_TX2_ERROR))
+ np->stats.tx_packets++;
+ dev_kfree_skb_any(np->get_tx_ctx->skb);
+ np->get_tx_ctx->skb = NULL;
+ }
+ if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
+ np->get_tx.ex = np->first_tx.ex;
+ if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
+ np->get_tx_ctx = np->first_tx_ctx;
+ }
+ if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
+ np->tx_stop = 0;
+ netif_wake_queue(dev);
+ }
}
/*
@@ -1700,9 +1990,8 @@ static void nv_tx_timeout(struct net_device *dev)
{
int i;
- printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
- dev->name, (unsigned long)np->ring_addr,
- np->next_tx, np->nic_tx);
+ printk(KERN_INFO "%s: Ring at %lx\n",
+ dev->name, (unsigned long)np->ring_addr);
printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
for (i=0;i<=np->register_size;i+= 32) {
printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
@@ -1750,13 +2039,16 @@ static void nv_tx_timeout(struct net_device *dev)
nv_stop_tx(dev);
/* 2) check that the packets were not sent already: */
- nv_tx_done(dev);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ nv_tx_done(dev);
+ else
+ nv_tx_done_optimized(dev, np->tx_ring_size);
/* 3) if there are dead entries: clear everything */
- if (np->next_tx != np->nic_tx) {
+ if (np->get_tx_ctx != np->put_tx_ctx) {
printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
nv_drain_tx(dev);
- np->next_tx = np->nic_tx = 0;
+ nv_init_tx(dev);
setup_hw_rings(dev, NV_SETUP_TX_RING);
netif_wake_queue(dev);
}
@@ -1823,40 +2115,27 @@ static int nv_rx_process(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
u32 flags;
- u32 vlanflags = 0;
- int count;
-
- for (count = 0; count < limit; ++count) {
- struct sk_buff *skb;
- int len;
- int i;
- if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
- break; /* we scanned the whole ring - do not continue */
-
- i = np->cur_rx % np->rx_ring_size;
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
- len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
- } else {
- flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
- len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
- vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);