diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2005-08-29 10:04:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-08-29 10:04:37 -0700 |
commit | 3d963f5bb1949af53a37acf36d3b12e97ca9b1e5 (patch) | |
tree | 9449490978cdb7858a7c713ee88f15ffc26a6d71 /drivers | |
parent | 5be1d85c208f135fc88f972f91b91a879b702b40 (diff) | |
parent | e13934563db047043ccead26412f552375cea90c (diff) |
Merge refs/heads/upstream from master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
Diffstat (limited to 'drivers')
55 files changed, 7426 insertions, 3718 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8edb6936fb9..79e8aa6f2b9 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -131,6 +131,8 @@ config NET_SB1000 source "drivers/net/arcnet/Kconfig" +source "drivers/net/phy/Kconfig" + # # Ethernet # diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 63c6d1e6d4d..a369ae284a9 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o # obj-$(CONFIG_MII) += mii.o +obj-$(CONFIG_PHYLIB) += phy/ obj-$(CONFIG_SUNDANCE) += sundance.o obj-$(CONFIG_HAMACHI) += hamachi.o diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 3707df6b0cf..60304f7e7e5 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -87,7 +87,6 @@ extern struct net_device *mvme147lance_probe(int unit); extern struct net_device *tc515_probe(int unit); extern struct net_device *lance_probe(int unit); extern struct net_device *mace_probe(int unit); -extern struct net_device *macsonic_probe(int unit); extern struct net_device *mac8390_probe(int unit); extern struct net_device *mac89x0_probe(int unit); extern struct net_device *mc32_probe(int unit); @@ -284,9 +283,6 @@ static struct devprobe2 m68k_probes[] __initdata = { #ifdef CONFIG_MACMACE /* Mac 68k Quadra AV builtin Ethernet */ {mace_probe, 0}, #endif -#ifdef CONFIG_MACSONIC /* Mac SONIC-based Ethernet of all sorts */ - {macsonic_probe, 0}, -#endif #ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */ {mac8390_probe, 0}, #endif @@ -318,17 +314,9 @@ static void __init ethif_probe2(int unit) #ifdef CONFIG_TR /* Token-ring device probe */ extern int ibmtr_probe_card(struct net_device *); -extern struct net_device *sk_isa_probe(int unit); -extern struct net_device *proteon_probe(int unit); extern struct net_device *smctr_probe(int unit); static struct devprobe2 tr_probes2[] __initdata = { -#ifdef CONFIG_SKISA - {sk_isa_probe, 0}, -#endif -#ifdef CONFIG_PROTEON - {proteon_probe, 0}, -#endif #ifdef CONFIG_SMCTR {smctr_probe, 0}, #endif diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 5ce606d9dc0..19e829b567d 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1106,18 +1106,13 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav } } - if (found) { - /* a slave was found that is using the mac address - * of the new slave - */ - printk(KERN_ERR DRV_NAME - ": Error: the hw address of slave %s is not " - "unique - cannot enslave it!", - slave->dev->name); - return -EINVAL; - } + if (!found) + return 0; - return 0; + /* Try setting slave mac to bond address and fall-through + to code handling that situation below... */ + alb_set_slave_mac_addr(slave, bond->dev->dev_addr, + bond->alb_info.rlb_enabled); } /* The slave's address is equal to the address of the bond. diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2c930da90a8..94c9f68dd16 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1604,6 +1604,44 @@ static int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_ return 0; } +#define BOND_INTERSECT_FEATURES \ + (NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) + +/* + * Compute the features available to the bonding device by + * intersection of all of the slave devices' BOND_INTERSECT_FEATURES. + * Call this after attaching or detaching a slave to update the + * bond's features. + */ +static int bond_compute_features(struct bonding *bond) +{ + int i; + struct slave *slave; + struct net_device *bond_dev = bond->dev; + int features = bond->bond_features; + + bond_for_each_slave(bond, slave, i) { + struct net_device * slave_dev = slave->dev; + if (i == 0) { + features |= BOND_INTERSECT_FEATURES; + } + features &= + ~(~slave_dev->features & BOND_INTERSECT_FEATURES); + } + + /* turn off NETIF_F_SG if we need a csum and h/w can't do it */ + if ((features & NETIF_F_SG) && + !(features & (NETIF_F_IP_CSUM | + NETIF_F_NO_CSUM | + NETIF_F_HW_CSUM))) { + features &= ~NETIF_F_SG; + } + + bond_dev->features = features; + + return 0; +} + /* enslave device <slave> to bond device <master> */ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -1811,6 +1849,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de new_slave->delay = 0; new_slave->link_failure_count = 0; + bond_compute_features(bond); + if (bond->params.miimon && !bond->params.use_carrier) { link_reporting = bond_check_dev_link(bond, slave_dev, 1); @@ -2015,7 +2055,7 @@ err_free: err_undo_flags: bond_dev->features = old_features; - + return res; } @@ -2100,6 +2140,8 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de /* release the slave from its bond */ bond_detach_slave(bond, slave); + bond_compute_features(bond); + if (bond->primary_slave == slave) { bond->primary_slave = NULL; } @@ -2243,6 +2285,8 @@ static int bond_release_all(struct net_device *bond_dev) bond_alb_deinit_slave(bond, slave); } + bond_compute_features(bond); + /* now that the slave is detached, unlock and perform * all the undo steps that should not be called from * within a lock. @@ -3588,6 +3632,7 @@ static int bond_master_netdev_event(unsigned long event, struct net_device *bond static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev) { struct net_device *bond_dev = slave_dev->master; + struct bonding *bond = bond_dev->priv; switch (event) { case NETDEV_UNREGISTER: @@ -3626,6 +3671,9 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave * TODO: handle changing the primary's name */ break; + case NETDEV_FEAT_CHANGE: + bond_compute_features(bond); + break; default: break; } @@ -4526,6 +4574,11 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode) } } +static struct ethtool_ops bond_ethtool_ops = { + .get_tx_csum = ethtool_op_get_tx_csum, + .get_sg = ethtool_op_get_sg, +}; + /* * Does not allocate but creates a /proc entry. * Allowed to fail. @@ -4555,6 +4608,7 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par bond_dev->stop = bond_close; bond_dev->get_stats = bond_get_stats; bond_dev->do_ioctl = bond_do_ioctl; + bond_dev->ethtool_ops = &bond_ethtool_ops; bond_dev->set_multicast_list = bond_set_multicast_list; bond_dev->change_mtu = bond_change_mtu; bond_dev->set_mac_address = bond_set_mac_address; @@ -4591,6 +4645,8 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER); + bond->bond_features = bond_dev->features; + #ifdef CONFIG_PROC_FS bond_create_proc_entry(bond); #endif diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index d27f377b3ee..38819698086 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -211,6 +211,9 @@ struct bonding { struct bond_params params; struct list_head vlan_list; struct vlan_group *vlgrp; + /* the features the bonding device supports, independently + * of any slaves */ + int bond_features; }; /** diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index b82fd15d089..9b596e0bbf9 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2767,7 +2767,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) " next_to_use <%x>\n" " next_to_clean <%x>\n" "buffer_info[next_to_clean]\n" - " dma <%zx>\n" + " dma <%llx>\n" " time_stamp <%lx>\n" " next_to_watch <%x>\n" " jiffies <%lx>\n" @@ -2776,7 +2776,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) E1000_READ_REG(&adapter->hw, TDT), tx_ring->next_to_use, i, - tx_ring->buffer_info[i].dma, + (unsigned long long)tx_ring->buffer_info[i].dma, tx_ring->buffer_info[i].time_stamp, eop, jiffies, diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index 1795425f512..8c62ced2c9b 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c @@ -1263,8 +1263,8 @@ speedo_init_rx_ring(struct net_device *dev) for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); - /* XXX: do we really want to call this before the NULL check? --hch */ - rx_align(skb); /* Align IP on 16 byte boundary */ + if (skb) + rx_align(skb); /* Align IP on 16 byte boundary */ sp->rx_skbuff[i] = skb; if (skb == NULL) break; /* OK. Just initially short of Rx bufs. */ @@ -1654,8 +1654,8 @@ static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry) struct sk_buff *skb; /* Get a fresh skbuff to replace the consumed one. */ skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); - /* XXX: do we really want to call this before the NULL check? --hch */ - rx_align(skb); /* Align IP on 16 byte boundary */ + if (skb) + rx_align(skb); /* Align IP on 16 byte boundary */ sp->rx_skbuff[entry] = skb; if (skb == NULL) { sp->rx_ringp[entry] = NULL; diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 64f0f697c95..7d93948aec8 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -85,6 +85,16 @@ * 0.33: 16 May 2005: Support for MCP51 added. * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. * 0.35: 26 Jun 2005: Support for MCP55 added. + * 0.36: 28 Jun 2005: Add jumbo frame support. + * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list + * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of + * per-packet flags. + * 0.39: 18 Jul 2005: Add 64bit descriptor support. + * 0.40: 19 Jul 2005: Add support for mac address change. + * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead + * of nv_remove + * 0.42: 06 Aug 2005: Fix lack of link speed initialization + * in the second (and later) nv_open call * * Known bugs: * We suspect that on some hardware no TX done interrupts are generated. @@ -96,7 +106,7 @@ * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * superfluous timer interrupts from the nic. */ -#define FORCEDETH_VERSION "0.35" +#define FORCEDETH_VERSION "0.41" #define DRV_NAME "forcedeth" #include <linux/module.h> @@ -131,11 +141,10 @@ * Hardware access: */ -#define DEV_NEED_LASTPACKET1 0x0001 /* set LASTPACKET1 in tx flags */ -#define DEV_IRQMASK_1 0x0002 /* use NVREG_IRQMASK_WANTED_1 for irq mask */ -#define DEV_IRQMASK_2 0x0004 /* use NVREG_IRQMASK_WANTED_2 for irq mask */ -#define DEV_NEED_TIMERIRQ 0x0008 /* set the timer irq flag in the irq mask */ -#define DEV_NEED_LINKTIMER 0x0010 /* poll link settings. Relies on the timer irq */ +#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ +#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ +#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ +#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ enum { NvRegIrqStatus = 0x000, @@ -146,13 +155,16 @@ enum { #define NVREG_IRQ_RX 0x0002 #define NVREG_IRQ_RX_NOBUF 0x0004 #define NVREG_IRQ_TX_ERR 0x0008 -#define NVREG_IRQ_TX2 0x0010 +#define NVREG_IRQ_TX_OK 0x0010 #define NVREG_IRQ_TIMER 0x0020 #define NVREG_IRQ_LINK 0x0040 +#define NVREG_IRQ_TX_ERROR 0x0080 #define NVREG_IRQ_TX1 0x0100 -#define NVREG_IRQMASK_WANTED_1 0x005f -#define NVREG_IRQMASK_WANTED_2 0x0147 -#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1)) +#define NVREG_IRQMASK_WANTED 0x00df + +#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ + NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ + NVREG_IRQ_TX1)) NvRegUnknownSetupReg6 = 0x008, #define NVREG_UNKSETUP6_VAL 3 @@ -286,6 +298,18 @@ struct ring_desc { u32 FlagLen; }; +struct ring_desc_ex { + u32 PacketBufferHigh; + u32 PacketBufferLow; + u32 Reserved; + u32 FlagLen; +}; + +typedef union _ring_type { + struct ring_desc* orig; + struct ring_desc_ex* ex; +} ring_type; + #define FLAG_MASK_V1 0xffff0000 #define FLAG_MASK_V2 0xffffc000 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) @@ -293,7 +317,7 @@ struct ring_desc { #define NV_TX_LASTPACKET (1<<16) #define NV_TX_RETRYERROR (1<<19) -#define NV_TX_LASTPACKET1 (1<<24) +#define NV_TX_FORCED_INTERRUPT (1<<24) #define NV_TX_DEFERRED (1<<26) #define NV_TX_CARRIERLOST (1<<27) #define NV_TX_LATECOLLISION (1<<28) @@ -303,7 +327,7 @@ struct ring_desc { #define NV_TX2_LASTPACKET (1<<29) #define NV_TX2_RETRYERROR (1<<18) -#define NV_TX2_LASTPACKET1 (1<<23) +#define NV_TX2_FORCED_INTERRUPT (1<<30) #define NV_TX2_DEFERRED (1<<25) #define NV_TX2_CARRIERLOST (1<<26) #define NV_TX2_LATECOLLISION (1<<27) @@ -379,9 +403,13 @@ struct ring_desc { #define TX_LIMIT_START 62 /* rx/tx mac addr + type + vlan + align + slack*/ -#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64) -/* even more slack */ -#define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128) +#define NV_RX_HEADERS (64) +/* even more slack. */ +#define NV_RX_ALLOC_PAD (64) + +/* maximum mtu size */ +#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ +#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ #define OOM_REFILL (1+HZ/20) #define POLL_WAIT (1+HZ/100) @@ -396,6 +424,7 @@ struct ring_desc { */ #define DESC_VER_1 0x0 #define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK) +#define DESC_VER_3 (0x02200|NVREG_TXRXCTL_RXCHECK) /* PHY defines */ #define PHY_OUI_MARVELL 0x5043 @@ -468,11 +497,12 @@ struct fe_priv { /* rx specific fields. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); */ - struct ring_desc *rx_ring; + ring_type rx_ring; unsigned int cur_rx, refill_rx; struct sk_buff *rx_skbuff[RX_RING]; dma_addr_t rx_dma[RX_RING]; unsigned int rx_buf_sz; + unsigned int pkt_limit; struct timer_list oom_kick; struct timer_list nic_poll; @@ -484,7 +514,7 @@ struct fe_priv { /* * tx specific fields. */ - struct ring_desc *tx_ring; + ring_type tx_ring; unsigned int next_tx, nic_tx; struct sk_buff *tx_skbuff[TX_RING]; dma_addr_t tx_dma[TX_RING]; @@ -519,6 +549,11 @@ static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); } +static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) +{ + return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; +} + static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, int delay, int delaymax, const char *msg) { @@ -792,7 +827,7 @@ static int nv_alloc_rx(struct net_device *dev) nr = refill_rx % RX_RING; if (np->rx_skbuff[nr] == NULL) { - skb = dev_alloc_skb(RX_ALLOC_BUFSIZE); + skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); if (!skb) break; @@ -803,9 +838,16 @@ static int nv_alloc_rx(struct net_device *dev) } np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_FROMDEVICE); - np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); - wmb(); - np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL); + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { + np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); + wmb(); + np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); + } else { + np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; + np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; + wmb(); + np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); + } dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", dev->name, refill_rx); refill_rx++; @@ -831,19 +873,37 @@ static void nv_do_rx_refill(unsigned long data) enable_irq(dev->irq); } -static int nv_init_ring(struct net_device *dev) +static void nv_init_rx(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); int i; - np->next_tx = np->nic_tx = 0; - for (i = 0; i < TX_RING; i++) - np->tx_ring[i].FlagLen = 0; - np->cur_rx = RX_RING; np->refill_rx = 0; for (i = 0; i < RX_RING; i++) - np->rx_ring[i].FlagLen = 0; + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) + np->rx_ring.orig[i].FlagLen = 0; + else + np->rx_ring.ex[i].FlagLen = 0; +} + +static void nv_init_tx(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + int i; + + np->next_tx = np->nic_tx = 0; + for (i = 0; i < TX_RING; i++) + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) + np->tx_ring.orig[i].FlagLen = 0; + else + np->tx_ring.ex[i].FlagLen = 0; +} + +static int nv_init_ring(struct net_device *dev) +{ + nv_init_tx(dev); + nv_init_rx(dev); return nv_alloc_rx(dev); } @@ -852,7 +912,10 @@ static void nv_drain_tx(struct net_device *dev) struct fe_priv *np = get_nvpriv(dev); int i; for (i = 0; i < TX_RING; i++) { - np->tx_ring[i].FlagLen = 0; + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) + np->tx_ring.orig[i].FlagLen = 0; + else + np->tx_ring.ex[i].FlagLen = 0; if (np->tx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->tx_dma[i], np->tx_skbuff[i]->len, @@ -869,7 +932,10 @@ static void nv_drain_rx(struct net_device *dev) struct fe_priv *np = get_nvpriv(dev); int i; for (i = 0; i < RX_RING; i++) { - np->rx_ring[i].FlagLen = 0; + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) + np->rx_ring.orig[i].FlagLen = 0; + else + np->rx_ring.ex[i].FlagLen = 0; wmb(); if (np->rx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->rx_dma[i], @@ -900,11 +966,19 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, PCI_DMA_TODEVICE); - np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) + np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); + else { + np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; + np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; + } spin_lock_irq(&np->lock); wmb(); - np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) + np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); + else + np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", dev->name, np->next_tx); { @@ -942,7 +1016,10 @@ static void nv_tx_done(struct net_device *dev) while (np->nic_tx != np->next_tx) { i = np->nic_tx % TX_RING; - Flags = le32_to_cpu(np->tx_ring[i].FlagLen); + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) + Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); + else + Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", dev->name, np->nic_tx, Flags); @@ -993,9 +1070,56 @@ static void nv_tx_timeout(struct net_device *dev) struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); - dprintk(KERN_DEBUG "%s: Got tx_timeout. irq: %08x\n", dev->name, + printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); + { + int i; + + printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", + dev->name, (unsigned long)np->ring_addr, + np->next_tx, np->nic_tx); + printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); + for (i=0;i<0x400;i+= 32) { + printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", + i, + |