diff options
author | David S. Miller <davem@davemloft.net> | 2008-05-15 00:34:44 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-05-15 00:34:44 -0700 |
commit | 63fe46da9c380b3f2bbdf3765044649517cc717c (patch) | |
tree | 9478c1aca1d692b408955aea20c9cd9a37e589c0 /drivers/net | |
parent | 99dd1a2b8347ac2ae802300b7862f6f7bcf17139 (diff) | |
parent | 066b2118976e6e7cc50eed39e2747c75343a23c4 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/wireless/iwlwifi/iwl-4965-rs.c
drivers/net/wireless/rt2x00/rt61pci.c
Diffstat (limited to 'drivers/net')
90 files changed, 4375 insertions, 1036 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 6f8e7d4cf74..2edda8cc7f9 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -319,7 +319,7 @@ static struct vortex_chip_info { {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, {"3c980 Cyclone", - PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, {"3c980C Python-T", PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, @@ -600,7 +600,6 @@ struct vortex_private { struct sk_buff* tx_skbuff[TX_RING_SIZE]; unsigned int cur_rx, cur_tx; /* The next free ring entry */ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ - struct net_device_stats stats; /* Generic stats */ struct vortex_extra_stats xstats; /* NIC-specific extra stats */ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ @@ -1875,7 +1874,7 @@ static void vortex_tx_timeout(struct net_device *dev) issue_and_wait(dev, TxReset); - vp->stats.tx_errors++; + dev->stats.tx_errors++; if (vp->full_bus_master_tx) { printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name); if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) @@ -1887,7 +1886,7 @@ static void vortex_tx_timeout(struct net_device *dev) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); iowrite16(DownUnstall, ioaddr + EL3_CMD); } else { - vp->stats.tx_dropped++; + dev->stats.tx_dropped++; netif_wake_queue(dev); } @@ -1928,8 +1927,8 @@ vortex_error(struct net_device *dev, int status) } dump_tx_ring(dev); } - if (tx_status & 0x14) vp->stats.tx_fifo_errors++; - if (tx_status & 0x38) vp->stats.tx_aborted_errors++; + if (tx_status & 0x14) dev->stats.tx_fifo_errors++; + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; if (tx_status & 0x08) vp->xstats.tx_max_collisions++; iowrite8(0, ioaddr + TxStatus); if (tx_status & 0x30) { /* txJabber or txUnderrun */ @@ -2051,8 +2050,8 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vortex_debug > 2) printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n", dev->name, tx_status); - if (tx_status & 0x04) vp->stats.tx_fifo_errors++; - if (tx_status & 0x38) vp->stats.tx_aborted_errors++; + if (tx_status & 0x04) dev->stats.tx_fifo_errors++; + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; if (tx_status & 0x30) { issue_and_wait(dev, TxReset); } @@ -2350,7 +2349,7 @@ boomerang_interrupt(int irq, void *dev_id) } else { printk(KERN_DEBUG "boomerang_interrupt: no skb!\n"); } - /* vp->stats.tx_packets++; Counted below. */ + /* dev->stats.tx_packets++; Counted below. */ dirty_tx++; } vp->dirty_tx = dirty_tx; @@ -2409,12 +2408,12 @@ static int vortex_rx(struct net_device *dev) unsigned char rx_error = ioread8(ioaddr + RxErrors); if (vortex_debug > 2) printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); - vp->stats.rx_errors++; - if (rx_error & 0x01) vp->stats.rx_over_errors++; - if (rx_error & 0x02) vp->stats.rx_length_errors++; - if (rx_error & 0x04) vp->stats.rx_frame_errors++; - if (rx_error & 0x08) vp->stats.rx_crc_errors++; - if (rx_error & 0x10) vp->stats.rx_length_errors++; + dev->stats.rx_errors++; + if (rx_error & 0x01) dev->stats.rx_over_errors++; + if (rx_error & 0x02) dev->stats.rx_length_errors++; + if (rx_error & 0x04) dev->stats.rx_frame_errors++; + if (rx_error & 0x08) dev->stats.rx_crc_errors++; + if (rx_error & 0x10) dev->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; @@ -2446,7 +2445,7 @@ static int vortex_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - vp->stats.rx_packets++; + dev->stats.rx_packets++; /* Wait a limited time to go to next packet. */ for (i = 200; i >= 0; i--) if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) @@ -2455,7 +2454,7 @@ static int vortex_rx(struct net_device *dev) } else if (vortex_debug > 0) printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of " "size %d.\n", dev->name, pkt_len); - vp->stats.rx_dropped++; + dev->stats.rx_dropped++; } issue_and_wait(dev, RxDiscard); } @@ -2482,12 +2481,12 @@ boomerang_rx(struct net_device *dev) unsigned char rx_error = rx_status >> 16; if (vortex_debug > 2) printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); - vp->stats.rx_errors++; - if (rx_error & 0x01) vp->stats.rx_over_errors++; - if (rx_error & 0x02) vp->stats.rx_length_errors++; - if (rx_error & 0x04) vp->stats.rx_frame_errors++; - if (rx_error & 0x08) vp->stats.rx_crc_errors++; - if (rx_error & 0x10) vp->stats.rx_length_errors++; + dev->stats.rx_errors++; + if (rx_error & 0x01) dev->stats.rx_over_errors++; + if (rx_error & 0x02) dev->stats.rx_length_errors++; + if (rx_error & 0x04) dev->stats.rx_frame_errors++; + if (rx_error & 0x08) dev->stats.rx_crc_errors++; + if (rx_error & 0x10) dev->stats.rx_length_errors++; } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; @@ -2529,7 +2528,7 @@ boomerang_rx(struct net_device *dev) } netif_rx(skb); dev->last_rx = jiffies; - vp->stats.rx_packets++; + dev->stats.rx_packets++; } entry = (++vp->cur_rx) % RX_RING_SIZE; } @@ -2591,7 +2590,7 @@ vortex_down(struct net_device *dev, int final_down) del_timer_sync(&vp->rx_oom_timer); del_timer_sync(&vp->timer); - /* Turn off statistics ASAP. We update vp->stats below. */ + /* Turn off statistics ASAP. We update dev->stats below. */ iowrite16(StatsDisable, ioaddr + EL3_CMD); /* Disable the receiver and transmitter. */ @@ -2728,7 +2727,7 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev) update_stats(ioaddr, dev); spin_unlock_irqrestore (&vp->lock, flags); } - return &vp->stats; + return &dev->stats; } /* Update statistics. @@ -2748,18 +2747,18 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev) /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ /* Switch to the stats window, and read everything. */ EL3WINDOW(6); - vp->stats.tx_carrier_errors += ioread8(ioaddr + 0); - vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); - vp->stats.tx_window_errors += ioread8(ioaddr + 4); - vp->stats.rx_fifo_errors += ioread8(ioaddr + 5); - vp->stats.tx_packets += ioread8(ioaddr + 6); - vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4; + dev->stats.tx_carrier_errors += ioread8(ioaddr + 0); + dev->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); + dev->stats.tx_window_errors += ioread8(ioaddr + 4); + dev->stats.rx_fifo_errors += ioread8(ioaddr + 5); + dev->stats.tx_packets += ioread8(ioaddr + 6); + dev->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4; /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */ /* Don't bother with register 9, an extension of registers 6&7. If we do use the 6&7 values the atomic update assumption above is invalid. */ - vp->stats.rx_bytes += ioread16(ioaddr + 10); - vp->stats.tx_bytes += ioread16(ioaddr + 12); + dev->stats.rx_bytes += ioread16(ioaddr + 10); + dev->stats.tx_bytes += ioread16(ioaddr + 12); /* Extra stats for get_ethtool_stats() */ vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2); vp->xstats.tx_single_collisions += ioread8(ioaddr + 3); @@ -2767,14 +2766,14 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev) EL3WINDOW(4); vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12); - vp->stats.collisions = vp->xstats.tx_multiple_collisions + dev->stats.collisions = vp->xstats.tx_multiple_collisions + vp->xstats.tx_single_collisions + vp->xstats.tx_max_collisions; { u8 up = ioread8(ioaddr + 13); - vp->stats.rx_bytes += (up & 0x0f) << 16; - vp->stats.tx_bytes += (up & 0xf0) << 12; + dev->stats.rx_bytes += (up & 0x0f) << 16; + dev->stats.tx_bytes += (up & 0xf0) << 12; } EL3WINDOW(old_window >> 13); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index af46341827f..9f6cc8a5607 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1273,20 +1273,6 @@ config PCNET32 To compile this driver as a module, choose M here. The module will be called pcnet32. -config PCNET32_NAPI - bool "Use RX polling (NAPI)" - depends on PCNET32 - help - NAPI is a new driver API designed to reduce CPU and interrupt load - when the driver is receiving lots of packets from the card. It is - still somewhat experimental and thus not yet enabled by default. - - If your estimated Rx load is 10kpps or more, or if the card will be - deployed on potentially unfriendly networks (e.g. in a firewall), - then say Y here. - - If in doubt, say N. - config AMD8111_ETH tristate "AMD 8111 (new PCI lance) support" depends on NET_PCI && PCI @@ -2440,7 +2426,7 @@ config CHELSIO_T3 config EHEA tristate "eHEA Ethernet support" - depends on IBMEBUS && INET && SPARSEMEM + depends on IBMEBUS && INET && SPARSEMEM && MEMORY_HOTPLUG select INET_LRO ---help--- This driver supports the IBM pSeries eHEA ethernet adapter. diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 82e9a5bd0dd..a0b4c851607 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -499,19 +499,13 @@ static void cops_reset(struct net_device *dev, int sleep) { outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */ inb(ioaddr+DAYNA_RESET); /* Clear the reset */ - if(sleep) - { - long snap=jiffies; - - /* Let card finish initializing, about 1/3 second */ - while (time_before(jiffies, snap + HZ/3)) - schedule(); - } - else - mdelay(333); + if (sleep) + msleep(333); + else + mdelay(333); } + netif_wake_queue(dev); - return; } static void cops_load (struct net_device *dev) diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 0afe522b8f7..9c2394d4942 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -1,7 +1,7 @@ /* * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. @@ -36,7 +36,6 @@ * A very incomplete list of things that need to be dealt with: * * TODO: - * Wake on LAN. * Add more ethtool functions. * Fix abstruse irq enable/disable condition described here: * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 @@ -638,21 +637,18 @@ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) } /* - *TODO: do something or get rid of this + * Force the PHY into power saving mode using vendor magic. */ #ifdef CONFIG_PM -static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) +static void atl1_phy_enter_power_saving(struct atl1_hw *hw) { -/* s32 ret_val; - * u16 phy_data; - */ + atl1_write_phy_reg(hw, MII_DBG_ADDR, 0); + atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E); + atl1_write_phy_reg(hw, MII_DBG_ADDR, 2); + atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000); + atl1_write_phy_reg(hw, MII_DBG_ADDR, 3); + atl1_write_phy_reg(hw, MII_DBG_DATA, 0); -/* - ret_val = atl1_write_phy_reg(hw, ...); - ret_val = atl1_write_phy_reg(hw, ...); - .... -*/ - return 0; } #endif @@ -2784,64 +2780,93 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) struct atl1_hw *hw = &adapter->hw; u32 ctrl = 0; u32 wufc = adapter->wol; + u32 val; + int retval; + u16 speed; + u16 duplex; netif_device_detach(netdev); if (netif_running(netdev)) atl1_down(adapter); + retval = pci_save_state(pdev); + if (retval) + return retval; + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); - if (ctrl & BMSR_LSTATUS) + val = ctrl & BMSR_LSTATUS; + if (val) wufc &= ~ATLX_WUFC_LNKC; - /* reduce speed to 10/100M */ - if (wufc) { - atl1_phy_enter_power_saving(hw); - /* if resume, let driver to re- setup link */ - hw->phy_configured = false; - atl1_set_mac_addr(hw); - atlx_set_multi(netdev); + if (val && wufc) { + val = atl1_get_speed_and_duplex(hw, &speed, &duplex); + if (val) { + if (netif_msg_ifdown(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "error getting speed/duplex\n"); + goto disable_wol; + } ctrl = 0; - /* turn on magic packet wol */ - if (wufc & ATLX_WUFC_MAG) - ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; - /* turn on Link change WOL */ - if (wufc & ATLX_WUFC_LNKC) - ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + /* enable magic packet WOL */ + if (wufc & ATLX_WUFC_MAG) + ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); - - /* turn on all-multi mode if wake on multicast is enabled */ - ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); - ctrl &= ~MAC_CTRL_DBG; - ctrl &= ~MAC_CTRL_PROMIS_EN; - if (wufc & ATLX_WUFC_MC) - ctrl |= MAC_CTRL_MC_ALL_EN; - else - ctrl &= ~MAC_CTRL_MC_ALL_EN; - - /* turn on broadcast mode if wake on-BC is enabled */ - if (wufc & ATLX_WUFC_BC) + ioread32(hw->hw_addr + REG_WOL_CTRL); + + /* configure the mac */ + ctrl = MAC_CTRL_RX_EN; + ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : + MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); + if (duplex == FULL_DUPLEX) + ctrl |= MAC_CTRL_DUPLX; + ctrl |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + if (adapter->vlgrp) + ctrl |= MAC_CTRL_RMV_VLAN; + if (wufc & ATLX_WUFC_MAG) ctrl |= MAC_CTRL_BC_EN; - else - ctrl &= ~MAC_CTRL_BC_EN; - - /* enable RX */ - ctrl |= MAC_CTRL_RX_EN; iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } else { - iowrite32(0, hw->hw_addr + REG_WOL_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); + ioread32(hw->hw_addr + REG_MAC_CTRL); + + /* poke the PHY */ + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto exit; } - pci_save_state(pdev); + if (!val && wufc) { + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + iowrite32(0, hw->hw_addr + REG_MAC_CTRL); + ioread32(hw->hw_addr + REG_MAC_CTRL); + hw->phy_configured = false; + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto exit; + } + +disable_wol: + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + atl1_phy_enter_power_saving(hw); + hw->phy_configured = false; + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); +exit: + if (netif_running(netdev)) + pci_disable_msi(adapter->pdev); pci_disable_device(pdev); - - pci_set_power_state(pdev, PCI_D3hot); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } @@ -2855,20 +2880,26 @@ static int atl1_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - /* FIXME: check and handle */ err = pci_enable_device(pdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "error enabling pci device\n"); + return err; + } + + pci_set_master(pdev); + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); - atl1_reset(adapter); + atl1_reset_hw(&adapter->hw); + adapter->cmb.cmb->int_stats = 0; if (netif_running(netdev)) atl1_up(adapter); netif_device_attach(netdev); - atl1_via_workaround(adapter); - return 0; } #else @@ -2876,6 +2907,13 @@ static int atl1_resume(struct pci_dev *pdev) #define atl1_resume NULL #endif +static void atl1_shutdown(struct pci_dev *pdev) +{ +#ifdef CONFIG_PM + atl1_suspend(pdev, PMSG_SUSPEND); +#endif +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void atl1_poll_controller(struct net_device *netdev) { @@ -3122,7 +3160,8 @@ static struct pci_driver atl1_driver = { .probe = atl1_probe, .remove = __devexit_p(atl1_remove), .suspend = atl1_suspend, - .resume = atl1_resume + .resume = atl1_resume, + .shutdown = atl1_shutdown }; /* diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 51893d66eae..a5015b14a42 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h @@ -1,7 +1,7 @@ /* * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c index f06b854e250..b3e7fcf0f6e 100644 --- a/drivers/net/atlx/atlx.c +++ b/drivers/net/atlx/atlx.c @@ -2,7 +2,7 @@ * * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h index 3be7c09734d..297a03da6b7 100644 --- a/drivers/net/atlx/atlx.h +++ b/drivers/net/atlx/atlx.h @@ -2,7 +2,7 @@ * * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver @@ -29,7 +29,7 @@ #include <linux/module.h> #include <linux/types.h> -#define ATLX_DRIVER_VERSION "2.1.1" +#define ATLX_DRIVER_VERSION "2.1.3" MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); MODULE_LICENSE("GPL"); @@ -460,6 +460,9 @@ MODULE_VERSION(ATLX_DRIVER_VERSION); #define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ #define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + /* PCI Command Register Bit Definitions */ #define PCI_REG_COMMAND 0x04 /* PCI Command Register */ #define CMD_IO_SPACE 0x0001 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6425603bc37..50a40e43315 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1425,13 +1425,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = netdev_set_master(slave_dev, bond_dev); if (res) { dprintk("Error %d calling netdev_set_master\n", res); - goto err_close; + goto err_restore_mac; } /* open the slave since the application closed it */ res = dev_open(slave_dev); if (res) { dprintk("Openning slave %s failed\n", slave_dev->name); - goto err_restore_mac; + goto err_unset_master; } new_slave->dev = slave_dev; @@ -1444,7 +1444,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) */ res = bond_alb_init_slave(bond, new_slave); if (res) { - goto err_unset_master; + goto err_close; } } @@ -1619,7 +1619,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = bond_create_slave_symlinks(bond_dev, slave_dev); if (res) - goto err_unset_master; + goto err_close; printk(KERN_INFO DRV_NAME ": %s: enslaving %s as a%s interface with a%s link.\n", @@ -1631,12 +1631,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) return 0; /* Undo stages on error */ -err_unset_master: - netdev_set_master(slave_dev, NULL); - err_close: dev_close(slave_dev); +err_unset_master: + netdev_set_master(slave_dev, NULL); + err_restore_mac: if (!bond->params.fail_over_mac) { memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); @@ -4936,7 +4936,9 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond if (res < 0) { rtnl_lock(); down_write(&bonding_rwsem); - goto out_bond; + bond_deinit(bond_dev); + unregister_netdevice(bond_dev); + goto out_rtnl; } return 0; @@ -4990,9 +4992,10 @@ err: destroy_workqueue(bond->wq); } + bond_destroy_sysfs(); + rtnl_lock(); bond_free_all(); - bond_destroy_sysfs(); rtnl_unlock(); out: return res; @@ -5004,9 +5007,10 @@ static void __exit bonding_exit(void) unregister_netdevice_notifier(&bond_netdev_notifier); unregister_inetaddr_notifier(&bond_inetaddr_notifier); + bond_destroy_sysfs(); + rtnl_lock(); bond_free_all(); - bond_destroy_sysfs(); rtnl_unlock(); } diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 979c2d05ff9..08f3d396bcd 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -146,29 +146,29 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t ": Unable remove bond %s due to open references.\n", ifname); res = -EPERM; - goto out; + goto out_unlock; } printk(KERN_INFO DRV_NAME ": %s is being deleted...\n", bond->dev->name); bond_destroy(bond); - up_write(&bonding_rwsem); - rtnl_unlock(); - goto out; + goto out_unlock; } printk(KERN_ERR DRV_NAME ": unable to delete non-existent bond %s\n", ifname); res = -ENODEV; - up_write(&bonding_rwsem); - rtnl_unlock(); - goto out; + goto out_unlock; } err_no_cmd: printk(KERN_ERR DRV_NAME ": no command found in bonding_masters. Use +ifname or -ifname.\n"); - res = -EPERM; + return -EPERM; + +out_unlock: + up_write(&bonding_rwsem); + rtnl_unlock(); /* Always return either count or an error. If you return 0, you'll * get called forever, which is bad. @@ -1437,8 +1437,16 @@ int bond_create_sysfs(void) * configure multiple bonding devices. */ if (ret == -EEXIST) { - netdev_class = NULL; - return 0; + /* Is someone being kinky and naming a device bonding_master? */ + if (__dev_get_by_name(&init_net, + class_attr_bonding_masters.attr.name)) + printk(KERN_ERR + "network device named %s already exists in sysfs", + class_attr_bonding_masters.attr.name); + else { + netdev_class = NULL; + return 0; + } } return ret; diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 4fdb13f8447..acebe431d06 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -71,6 +71,7 @@ enum { /* adapter flags */ USING_MSIX = (1 << 2), QUEUES_BOUND = (1 << 3), TP_PARITY_INIT = (1 << 4), + NAPI_INIT = (1 << 5), }; struct fl_pg_chunk { diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h index 91ee7277b81..579bee42a5c 100644 --- a/drivers/net/cxgb3/common.h +++ b/drivers/net/cxgb3/common.h @@ -698,6 +698,7 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index); void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, int reset); +int t3_replay_prep_adapter(struct adapter *adapter); void t3_led_ready(struct adapter *adapter); void t3_fatal_err(struct adapter *adapter); void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 05e5f59e87f..3a312721679 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -421,6 +421,13 @@ static void init_napi(struct adapter *adap) netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, 64); } + + /* + * netif_napi_add() can be called only once per napi_struct because it + * adds each new napi_struct to a list. Be careful not to call it a + * second time, e.g., during EEH recovery, by making a note of it. + */ + adap->flags |= NAPI_INIT; } /* @@ -896,7 +903,8 @@ static int cxgb_up(struct adapter *adap) goto out; setup_rss(adap); - init_napi(adap); + if (!(adap->flags & NAPI_INIT)) + init_napi(adap); adap->flags |= FULL_INIT_DONE; } @@ -999,7 +1007,7 @@ static int offload_open(struct net_device *dev) return 0; if (!adap_up && (err = cxgb_up(adapter)) < 0) - return err; + goto out; t3_tp_set_offload_mode(adapter, 1); tdev->lldev = adapter->port[0]; @@ -1061,10 +1069,8 @@ static int cxgb_open(struct net_device *dev) int other_ports = adapter->open_device_map & PORT_MASK; int err; - if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { - quiesce_rx(adapter); + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) return err; - } set_bit(pi->port_id, &adapter->open_device_map); if (is_offload(adapter) && !ofld_disable) { @@ -1894,11 +1900,11 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) u8 *fw_data; struct ch_mem_range t; - if (!capable(CAP_NET_ADMIN)) + if (!capable(CAP_SYS_RAWIO)) return -EPERM; if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; - + /* Check t.len sanity ? */ fw_data = kmalloc(t.len, GFP_KERNEL); if (!fw_data) return -ENOMEM; @@ -2424,14 +2430,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) offload_close(&adapter->tdev); - /* Free sge resources */ - t3_free_sge_resources(adapter); - adapter->flags &= ~FULL_INIT_DONE; pci_disable_device(pdev); - /* Request a slot slot reset. */ + /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } @@ -2448,13 +2451,20 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev) if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; + goto err; } pci_set_master(pdev); + pci_restore_state(pdev); - t3_prep_adapter(adapter, adapter->params.info, 1); + /* Free sge resources */ + t3_free_sge_resources(adapter); + + if (t3_replay_prep_adapter(adapter)) + goto err; return PCI_ERS_RESULT_RECOVERED; +err: + return PCI_ERS_RESULT_DISCONNECT; } /** @@ -2483,13 +2493,6 @@ static void t3_io_resume(struct pci_dev *pdev) netif_device_attach(netdev); } } - - if (is_offload(adapter)) { - __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map); - if (offload_open(adapter->port[0])) - printk(KERN_WARNING - "Could not bring back offload capabilities\n"); - } } static struct pci_error_handlers t3_err_handler = { @@ -2608,6 +2611,7 @@ static int __devinit init_one(struct pci_dev *pdev, } pci_set_master(pdev); + pci_save_state(pdev); mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index 02dbbb30092..56717887934 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -444,6 +444,14 @@ #define A_PCIE_CFG 0x88 +#define S_ENABLELINKDWNDRST 21 +#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST) +#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U) + +#define S_ENABLELINKDOWNRST 20 +#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST) +#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U) + #define S_PCIE_CLIDECEN 16 #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) #define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U) diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 98a6bbd11d4..796eb305cdc 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -539,6 +539,31 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, } /** + * t3_reset_qset - reset a sge qset + * @q: the queue set + * + * Reset the qset structure. + * the NAPI structure is preserved in the event of + * the qset's reincarnation, for example during EEH recovery. + */ +static void t3_reset_qset(struct sge_qset *q) +{ + if (q->adap && + !(q->adap->flags & NAPI_INIT)) { + memset(q, 0, sizeof(*q)); + return; + } + + q->adap = NULL; + memset(&q->rspq, 0, sizeof(q->rspq)); + memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); + memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); + q->txq_stopped = 0; + memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); +} + + +/** * free_qset - free the resources of an SGE queue set * @adapter: the adapter owning the queue set * @q: the queue set @@ -594,7 +619,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) q->rspq.desc, q->rspq.phys_addr); } - memset(q, 0, sizeof(*q)); + t3_reset_qset(q); } /** @@ -1365,7 +1390,7 @@ static void restart_ctrlq(unsigned long data) */ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) { - int ret; + int ret; local_bh_disable(); ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); local_bh_enable(); diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index a99496a431c..d405a932c73 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -3264,6 +3264,7 @@ static void config_pcie(struct adapter *adap) t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); t3_set_reg_field(adap, A_PCIE_CFG, 0, + F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST | F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); } @@ -3655,3 +3656,30 @@ void t3_led_ready(struct adapter *adapter) t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, F_GPIO0_OUT_VAL); } + +int t3_replay_prep_adapter(struct adapter *adapter) +{ + const struct adapter_info *ai = adapter->params.info; + unsigned int i, j = 0; + int ret; + + early_hw_init(adapter, ai); + ret = init_parity(adapter); + if (ret) + return ret; + + for_each_port(adapter, i) { + struct port_info *p = adap2pinfo(adapter, i); + while (!adapter->params.vpd.port_type[j]) + ++j; + + p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, + ai->mdio_ops); + + p->phy.ops->power_down(&p->phy, 1); + ++j; + } + +return 0; +} + diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 12b4626102e..32a9a922f15 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -117,6 +117,9 @@ typedef struct board_info { struct mutex addr_lock; /* phy and eeprom access lock */ + struct delayed_work phy_poll; + struct net_device *ndev; + spinlock_t lock; struct mii_if_info mii; @@ -297,6 +300,10 @@ static void dm9000_set_io(struct board_info *db, int byte_width) } } +static void dm9000_schedule_poll(board_info_t *db) +{ + schedule_delayed_work(&db->phy_poll, HZ * 2); +} /* Our watchdog timed out. Called by the networking layer */ static void dm9000_timeout(struct net_device *dev) @@ -465,6 +472,17 @@ static const struct ethtool_ops dm9000_ethtool_ops = { .set_eeprom = dm9000_set_eeprom, }; +static void +dm9000_poll_work(struct work_struct *w) +{ + struct delayed_work *dw = container_of(w, struct delayed_work, work); + board_info_t *db = container_of(dw, board_info_t, phy_poll); + + mii_check_media(&db->mii, netif_msg_link(db), 0); + + if (netif_running(db->ndev)) + dm9000_schedule_poll(db); +} /* dm9000_release_board * @@ -503,7 +521,7 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db) /* * Search DM9000 board, allocate space and register it */ -static int +static int __devinit dm9000_probe(struct platform_device *pdev) { struct dm9000_plat_data *pdata = pdev->dev.platform_data; @@ -525,17 +543,21 @@ dm9000_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); - dev_dbg(&pdev->dev, "dm9000_probe()"); + dev_dbg(&pdev->dev, "dm9000_probe()\n"); /* setup board info structure */ db = (struct board_info *) ndev->priv; memset(db, 0, sizeof (*db)); db->dev = &pdev->dev; + db->ndev = ndev; spin_lock_init(&db->lock); mutex_init(&db->addr_lock); + INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); + + if (pdev->num_resources < 2) { ret = -ENODEV; goto out; @@ -761,6 +783,8 @@ dm9000_open(struct net_device *dev) mii_check_media(&db->mii, netif_msg_link(db), 1); netif_start_queue(dev); + + dm9000_schedule_poll(db); return 0; } @@ -879,6 +903,8 @@ dm9000_stop(struct net_device *ndev) if (netif_msg_ifdown(db)) dev_dbg(db->dev, "shutting down %s\n", ndev->name); + cancel_delayed_work(&db->phy_poll); + netif_stop_queue(ndev); netif_carrier_off(ndev); @@ -1288,6 +1314,8 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) spin_unlock_irqrestore(&db->lock,flags); mutex_unlock(&db->addr_lock); + + dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); return ret; } @@ -1301,6 +1329,7 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value) unsigned long flags; unsigned long reg_save; + dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); mutex_lock(&db->addr_lock); spin_lock_irqsave(&db->lock,flags); @@ -1372,7 +1401,7 @@ dm9000_drv_resume(struct platform_device *dev) return 0; } -static int +static int __devexit dm9000_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); @@ -1393,7 +1422,7 @@ static struct platform_driver dm9000_driver = { .owner = THIS_MODULE, }, .probe = dm9000_probe, - .remove = dm9000_drv_remove, + .remove = __devexit_p(dm9000_drv_remove), .suspend = dm9000_drv_suspend, .resume = dm9000_drv_resume, }; diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 2a53875cddb..f823b8ba578 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h @@ -648,6 +648,8 @@ #define IFE_E_PHY_ID 0x02A80330 #define IFE_PLUS_E_PHY_ID 0x02A80320 #define IFE_C_E_PHY_ID 0x02A80310 +#define BME1000_E_PHY_ID 0x01410CB0 +#define BME1000_E_PHY_ID_R2 0x01410CB1 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ @@ -701,6 +703,14 @@ #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +/* BME1000 PHY Specific Control Register */ +#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) + /* * Bits... * 15-5: page diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 38bfd0d261f..d3bc6f8101f 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -127,7 +127,7 @@ struct e1000_buffer { /* arrays of page information for packet split */ struct e1000_ps_page *ps_pages; }; - + struct page *page; }; struct e1000_ring { @@ -304,6 +304,7 @@ struct e1000_info { #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) #define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_IS_ICH (1 << 9) #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) #define FLAG_IS_QUAD_PORT_A (1 << 12) #define FLAG_IS_QUAD_PORT (1 << 13) @@ -386,6 +387,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, bool state); extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); @@ -443,6 +445,9 @@ extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); +extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index ce045acce63..a14561f40db 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -494,8 +494,12 @@ static int e1000_get_eeprom(struct net_device *netdev, for (i = 0; i < last_word - first_word + 1; i++) { ret_val = e1000_read_nvm(hw, first_word + i, 1, &eeprom_buff[i]); - if (ret_val) + if (ret_val) { + /* a read error occurred, throw away the + * result */ + memset(eeprom_buff, 0xff, sizeof(eeprom_buff)); break; + } } } @@ -803,8 +807,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) /* restore previous status */ ew32(STATUS, before); - if ((mac->type != e1000_ich8lan) && - (mac->type != e1000_ich9lan)) { + if (!(adapter->flags & FLAG_IS_ICH)) { REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); @@ -824,15 +827,13 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); - before = (((mac->type == e1000_ich8lan) || - (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE); + before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); - if ((mac->type != e1000_ich8lan) && - (mac->type != e1000_ich9lan)) + if (!(adapter->flags & FLAG_IS_ICH)) REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); @@ -911,9 +912,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Test each interrupt */ for (i = 0; i < 10; i++) { - - if (((adapter->hw.mac.type == e1000_ich8lan) || - (adapter->hw.mac.type == e1000_ich9lan)) && i == 8) + if ((adapter->flags & FLAG_IS_ICH) && (i == 8)) continue; /* Interrupt to test */ @@ -1184,6 +1183,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; u32 stat_reg = 0; + u16 phy_reg = 0; hw->mac.autoneg = 0; @@ -1211,6 +1211,28 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) E1000_CTRL_SPD_100 |/* Force Speed to 100 */ E1000_CTRL_FD); /* Force Duplex to FULL */ break; + case e1000_phy_bm: + /* Set Default MAC Interface speed to 1GB */ + e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); + phy_reg &= ~0x0007; + phy_reg |= 0x006; + e1e_wphy(hw, PHY_REG(2, 21), phy_reg); + /* Assert SW reset for above settings to take effect */ + e1000e_commit_phy(hw); + mdelay(1); + /* Force Full Duplex */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); + /* Set Link Up (in force link) */ + e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); + /* Force Link */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); + /* Set Early Link Enable */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); + /* fall through */ default: /* force 1000, set loopback */ e1e_wphy(hw, PHY_CONTROL, 0x4140); @@ -1224,8 +1246,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ E1000_CTRL_FD); /* Force Duplex to FULL */ - if ((adapter->hw.mac.type == e1000_ich8lan) || - (adapter->hw.mac.type == e1000_ich9lan)) + if (adapter->flags & FLAG_IS_ICH) ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ } diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index a930e6d9cf0..74f263acb17 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -216,6 +216,21 @@ enum e1e_registers { #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ #define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE 769 +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) + +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 #define IGP01E1000_PHY_POLARITY_MASK 0x0078 @@ -331,10 +346,16 @@ enum e1e_registers { #define E1000_DEV_ID_ICH8_IFE_G 0x10C5 #define E1000_DEV_ID_ICH8_IGP_M 0x104D #define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 +#define E1000_DEV_ID_ICH9_IGP_M 0x10BF +#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB #define E1000_DEV_ID_ICH9_IGP_C 0x294C #define E1000_DEV_ID_ICH9_IFE 0x10C0 #define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 #define E1000_DEV_ID_ICH9_IFE_G 0x10C2 +#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC +#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD +#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE #define E1000_FUNC_1 1 @@ -378,6 +399,7 @@ enum e1000_phy_type { e1000_phy_gg82563, e1000_phy_igp_3, e1000_phy_ife, + e1000_phy_bm, }; enum e1000_bus_width { diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 768485dbb2c..9e38452a738 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -38,6 +38,12 @@ * 82566DM Gigabit Network Connection * 82566MC Gigabit Network Connection * 82566MM Gigabit Network Connection + * 82567LM Gigabit Network Connection + * 82567LF Gigabit Network Connection + * 82567LM-2 Gigabit Network Connection + * 82567LF-2 Gigabit Network Connection + * 82567V-2 Gigabit Network Connection + * 82562GT-3 10/100 Network Connection */ #include <linux/netdevice.h> @@ -198,6 +204,19 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) phy->addr = 1; phy->reset_delay_us = 100; + /* + * We may need to do this twice - once for IGP and if that fails, + * we'll set BM func pointers and try again + */ + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; + hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) + return ret_val; + } + phy->id = 0; while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && (i++ < 100)) { @@ -219,6 +238,13 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) phy->type = e1000_phy_ife; phy->autoneg_mask = E1000_ALL_NOT_GIG; break; + case BME1000_E_PHY_ID: + phy->type = e1000_phy_bm; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; + hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; + hw->phy.ops.commit_phy = e1000e_phy_sw_reset; + break; default: return -E1000_ERR_PHY; break; @@ -664,6 +690,7 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) return e1000_get_phy_info_ife_ich8lan(hw); break; case e1000_phy_igp_3: + case e1000_phy_bm: return e1000e_get_phy_info_igp(hw); break; default: @@ -728,7 +755,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) s32 ret_val = 0; u16 data; - if (phy->type != e1000_phy_igp_3) + if (phy->type == e1000_phy_ife) return ret_val; phy_ctrl = er32(PHY_CTRL); @@ -1918,8 +1945,35 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) ret_val = e1000e_copper_link_setup_igp(hw); if (ret_val) return ret_val; + } else if (hw->phy.type == e1000_phy_bm) { + ret_val = e1000e_copper_link_setup_m88(hw); + if (ret_val) + return ret_val; } + if (hw->phy.type == e1000_phy_ife) { + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); + if (ret_val) + return ret_val; + } return e1000e_setup_copper_link(hw); } @@ -2127,6 +2181,31 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) } /** + * e1000e_disable_gig_wol_ich8lan - disable gig during WoL + * @hw: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation + * to a lower speed. + * + * Should only be called for ICH9 devices. + **/ +void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) +{ + u32 phy_ctrl; + + if (hw->mac.type == e1000_ich9lan) { + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_GBE_DISABLE; + ew32(PHY_CTRL, phy_ctrl); + } + + return; +} + +/** * e1000_cleanup_led_ich8lan - Restore the default LED operation * @hw: pointer to the HW structure * @@ -2247,6 +2326,7 @@ static struct e1000_nvm_operations ich8_nvm_ops = { struct e1000_info e1000_ich8_info = { .mac = e1000_ich8lan, .flags = FLAG_HAS_WOL + | FLAG_IS_ICH | FLAG_RX_CSUM_ENABLED | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT @@ -2262,6 +2342,7 @@ struct e1000_info e1000_ich8_info = { struct e1000_info e1000_ich9_info = { .mac = e1000_ich9lan, .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH | FLAG_HAS_WOL | FLAG_RX_CSUM_ENABLED | FLAG_HAS_CTRLEXT_ON_LOAD diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 8991ab8911e..8cbb40f3a50 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -43,10 +43,11 @@ #include <linux/if_vlan.h> #include <linux/cpu.h> #include <linux/smp.h> +#include <linux/pm_qos_params.h> #include "e1000.h" -#define DRV_VERSION "0.2.1" +#define DRV_VERSION "0.3.3.3-k2" char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -341,6 +342,89 @@ no_buffers: } /** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ + +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - + 16 /* for skb_reserve */ - + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = netdev_alloc_skb(netdev, bufsz); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) + buffer_info->dma = pci_map_page(pdev, + buffer_info->page, 0, + PAGE_SIZE, + PCI_DMA_FROMDEVICE); + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** * e1000_clean_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure * @@ -783,6 +867,186 @@ next_desc: } /** + * e1000_consume_page - helper function + **/ +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ + +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + ++i; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window + * too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + +#define rxtop rx_ring->rx_skb_top + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the current skb, we only consumed the + * page */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page, + KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr, + KM_SKB_DATA_SOFTIRQ); + /* re-use the page, so don't erase + * buffer_info->page */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + ndev_err(netdev, "pskb_may_pull failed.\n"); + dev_kfree_skb(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, netdev, skb, status, + rx_desc->special); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + adapter->net_stats.rx_bytes += total_rx_bytes; + adapter->net_stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure **/ @@ -802,6 +1066,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) pci_unmap_single(pdev, buffer_info->dma, adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); + else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) + pci_unmap_page(pdev, buffer_info->dma, + PAGE_SIZE, + PCI_DMA_FROMDEVICE); else if (adapter->clean_rx == e1000_clean_rx_irq_ps) pci_unmap_single(pdev, buffer_info->dma, adapter->rx_ps_bsize0, @@ -809,6 +1077,11 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) buffer_info->dma = 0; } + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; @@ -1755,10 +2028,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) * a lot of memory, since we allocate 3 pages at all times * per packet. */ - adapter->rx_ps_pages = 0; pages = PAGE_USE_COUNT(adapter->netdev->mtu); - if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) && + (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) adapter->rx_ps_pages = pages; + else + adapter->rx_ps_pages = 0; if (adapter->rx_ps_pages) { /* Configure extra packet-split registers */ @@ -1819,9 +2094,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) sizeof(union e1000_rx_desc_packet_split); adapter->clean_rx = e1000_clean_rx_irq_ps; adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; + } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { + rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; } else { - rdlen = rx_ring->count * - sizeof(struct e1000_rx_desc); + rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } @@ -1885,8 +2163,21 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ if ((adapter->flags & FLAG_HAS_ERT) && - (adapter->netdev->mtu > ETH_DATA_LEN)) - ew32(ERT, E1000_ERT_2048); + (adapter->netdev->mtu > ETH_DATA_LEN)) { + u32 rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl | 0x3); + ew32(ERT, E1000_ERT_2048 | (1 << 13)); + /* + * With jumbo frames and early-receive enabled, excessive + * C4->C2 latencies result in dropped transactions. + */ + pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, + e1000e_driver_name, 55); + } else { + pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, + e1000e_driver_name, + PM_QOS_DEFAULT_VALUE); + } /* Enable Receives */ ew32(RCTL, rctl); @@ -2155,6 +2446,14 @@ void e1000e_reset(struct e1000_adapter *adapter) /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); + + /* + * For parts with AMT enabled, let the firmware know + * that the network interface is in control + */ + if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) + e1000_get_hw_control(adapter); + ew32(WUC, 0); if (mac->ops.init_hw(hw)) @@ -3469,6 +3768,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs */ if (max_frame <= 256) @@ -3626,6 +3927,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) ew32(CTRL_EXT, ctrl_ext); } + if (adapter->flags & FLAG_IS_ICH) + e1000e_disable_gig_wol_ich8lan(&adapter->hw); + /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); @@ -4292,6 +4596,13 @@ static struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, { } /* terminate list */ }; @@ -4326,7 +4637,9 @@ static int __init e1000_init_module(void) printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", e1000e_driver_name); ret = pci_register_driver(&e1000_driver); - + pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, + PM_QOS_DEFAULT_VALUE); + return ret; } module_init(e1000_init_module); @@ -4340,6 +4653,7 @@ module_init(e1000_init_module); static void __exit e1000_exit_module(void) { pci_unregister_driver(&e1000_driver); + pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name); } module_exit(e1000_exit_module); diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index e102332a6be..b133dcf0e95 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -34,6 +34,9 @@ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = @@ -465,6 +468,10 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) if (phy->disable_polarity_correction == 1) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + /* Enable downshift on BM (disabled by default) */ + if (phy->type == e1000_phy_bm) + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; @@ -1776,6 +1783,10 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) case IFE_C_E_PHY_ID: phy_type = e1000_phy_ife; break; + case BME1000_E_PHY_ID: + case BME1000_E_PHY_ID_R2: + phy_type = e1000_phy_bm; + break; default: phy_type = e1000_phy_unknown; break; @@ -1784,6 +1795,273 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) } /** + * e1000e_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000e_determine_phy_address(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_PHY_TYPE; + u32 phy_addr= 0; + u32 i = 0; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + do { + for (phy_addr = 0; phy_addr < 4; phy_addr++) { + hw->phy.addr = phy_addr; + e1000e_get_phy_id(hw); + phy_type = e1000e_get_phy_type_from_id(hw->phy.id); + + /* + * If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) { + ret_val = 0; + break; + } + } + i++; + } while ((ret_val != 0) && (i < 100)); + + return ret_val; +} + +/** + * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address + * @page: page to access + * + * Returns the phy address for the page requested. + **/ +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000e_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select = 0; + u32 page = offset >> IGP_PAGE_SHIFT; + u32 page_shift = 0; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false); + goto out; + } + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + goto out; + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* + * Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) { + hw->phy.ops.release_phy(hw); + goto out; + } + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release_phy(hw); + +out: + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select = 0; + u32 page = offset >> IGP_PAGE_SHIFT; + u32 page_shift = 0; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true); + goto out; + } + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + goto out; + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* + * Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) { + hw->phy.ops.release_phy(hw); + goto out; + } + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + hw->phy.ops.release_phy(hw); + +out: + return ret_val; +} + +/** + * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. Note that procedure to read the wakeup + * registers are different. It works as such: + * 1) Set page 769, register 17, bit 2 = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769_17.2 to its original value + **/ +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read) +{ + s32 ret_val; + u16 reg = ((u16)offset) & PHY_REG_MASK; + u16 phy_reg = 0; + u8 phy_acquired = 1; + + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) { + phy_acquired = 0; + goto out; + } + + /* All operations in this function are phy address 1 */ + hw->phy.addr = 1; + + /* Set page 769 */ + e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); + + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); + if (ret_val) + goto out; + + /* First clear bit 4 to avoid a power state change */ + phy_reg &= ~(BM_WUC_HOST_WU_BIT); + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) + goto out; + + /* Write bit 2 = 1, and clear bit 4 to 769_17 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, + phy_reg | BM_WUC_ENABLE_BIT); + if (ret_val) + goto out; + + /* Select page 800 */ + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + (BM_WUC_PAGE << IGP_PAGE_SHIFT)); + + /* Write the page 800 offset value using opcode 0x11 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) + goto out; + + if (read) { + /* Read the page 800 value using opcode 0x12 */ + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + data); + } else { + /* Read the page 800 value using opcode 0x12 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) + goto out; + + /* + * Restore 769_17.2 to its original value + * Set page 769 + */ + e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); + + /* Clear 769_17.2 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + +out: + if (phy_acquired == 1) + hw->phy.ops.release_phy(hw); + return ret_val; +} + +/** * e1000e_commit_phy - Soft PHY reset * @hw: pointer to the HW structure * diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 2eb82aba4a8..795c594a4b7 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -202,7 +202,7 @@ static unsigned short start_code[] = { 0x0000,Cmd_MCast, 0x0076, /* link to next command */ #define CONF_NR_MULTICAST 0x44 - 0x0000, /* number of multicast addresses */ + 0x0000, /* number of bytes in multicast address(es) */ #define CONF_MULTICAST 0x46 0x0000, 0x0000, 0x0000, /* some addresses */ 0x0000, 0x0000, 0x0000, @@ -1569,7 +1569,7 @@ static void eexp_hw_init586(struct net_device *dev) static void eexp_setup_filter(struct net_device *dev) { - struct dev_mc_list *dmi = dev->mc_list; + struct dev_mc_list *dmi; unsigned short ioaddr = dev->base_addr; int count = dev->mc_count; int i; @@ -1580,9 +1580,9 @@ static void eexp_setup_filter(struct net_device *dev) } outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); - outw(count, ioaddr+SHADOW(CONF_NR_MULTICAST)); - for (i = 0; i < count; i++) { - unsigned short *data = (unsigned short *)dmi->dmi_addr; + outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); + for (i = 0, dmi = dev->mc_list; i < count; i++, dmi = dmi->next) { + unsigned short *data; if (!dmi) { printk(KERN_INFO "%s: too few multicast addresses\n", dev->name); break; @@ -1591,6 +1591,7 @@ static void eexp_setup_filter(struct net_device *dev) printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name); continue; } + data = (unsigned short *)dmi->dmi_addr; outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR); outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i))); outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR); diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index f5dacceab95..fe872fbd671 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0090" +#define DRV_VERSION "EHEA_0091" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 @@ -118,6 +118,13 @@ #define EHEA_MR_ACC_CTRL 0x00800000 #define EHEA_BUSMAP_START 0x8000000000000000ULL +#define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL +#define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */ +#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2) +#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT) +#define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */ +#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1) + #define EHEA_WATCH_DOG_TIMEOUT 10*HZ @@ -192,10 +199,20 @@ struct h_epas { set to 0 if unused */ }; -struct ehea_busmap { - unsigned int entries; /* total number of entries */ - unsigned int valid_sections; /* number of valid sections */ - u64 *vaddr; +/* + * Memory map data structures + */ +struct ehea_dir_bmap +{ + u64 ent[EHEA_MAP_ENTRIES]; +}; +struct ehea_top_bmap +{ + struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES]; +}; +struct ehea_bmap +{ + struct ehea_top_bmap *top[EHEA_MAP_ENTRIES]; }; struct ehea_qp; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index f9bc21c74b5..d1b6d4e7495 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -35,6 +35,7 @@ #include <linux/if_ether.h> #include <linux/notifier.h> #include <linux/reboot.h> +#include <linux/memory.h> #include <asm/kexec.h> #include <linux/mutex.h> @@ -3503,6 +3504,24 @@ void ehea_crash_handler(void) 0, H_DEREG_BCMC); } +static int ehea_mem_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + switch (action) { + case MEM_OFFLINE: + ehea_info("memory has been removed"); + ehea_rereg_mrs(NULL); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block ehea_mem_nb = { + .notifier_call = ehea_mem_notifier, +}; + static int ehea_reboot_notifier(struct notifier_block *nb, unsigned long action, void *unused) { @@ -3581,6 +3600,10 @@ int __init ehea_module_init(void) if (ret) ehea_info("failed registering reboot notifier"); + ret = register_memory_notifier(&ehea_mem_nb); + if (ret) + ehea_info("failed registering memory remove notifier"); + ret = crash_shutdown_register(&ehea_crash_handler); if (ret) ehea_info("failed registering crash handler"); @@ -3604,6 +3627,7 @@ int __init ehea_module_init(void) out3: ibmebus_unregister_driver(&ehea_driver); out2: + unregister_memory_notifier(&ehea_mem_nb); unregister_reboot_notifier(&ehea_reboot_nb); crash_shutdown_unregister(&ehea_crash_handler); out: @@ -3621,6 +3645,7 @@ static void __exit ehea_module_exit(void) ret = crash_shutdown_unregister(&ehea_crash_handler); if (ret) ehea_info("failed unregistering crash handler"); + unregister_memory_notifier(&ehea_mem_nb); kfree(ehea_fw_handles.arr); kfree(ehea_bcmc_regs.arr); ehea_destroy_busmap(); diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index d522e905f46..140f05baafd 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -31,8 +31,8 @@ #include "ehea_phyp.h" #include "ehea_qmr.h" +struct ehea_bmap *ehea_bmap = NULL; -struct ehea_busmap ehea_bmap = { 0, 0, NULL }; static void *hw_qpageit_get_inc(struct hw_queue *queue) @@ -559,125 +559,253 @@ int ehea_destroy_qp(struct ehea_qp *qp) return 0; } -int ehea_create_busmap(void) +static inline int ehea_calc_index(unsigned long i, unsigned long s) { - u64 vaddr = EHEA_BUSMAP_START; - unsigned long high_section_index = 0; - int i; + return (i >> s) & EHEA_INDEX_MASK; +} - /* - * Sections are not in ascending order -> Loop over all sections and - * find the highest PFN to compute the required map size. - */ - ehea_bmap.valid_sections = 0; +static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, + int dir) +{ + if(!ehea_top_bmap->dir[dir]) { + ehea_top_bmap->dir[dir] = + kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL); + if (!ehea_top_bmap->dir[dir]) + return -ENOMEM; + } + return 0; +} - for (i = 0; i < NR_MEM_SECTIONS; i++) - if (valid_section_nr(i)) - high_section_index = i; +static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) +{ + if(!ehea_bmap->top[top]) { + ehea_bmap->top[top] = + kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL); + if (!ehea_bmap->top[top]) + return -ENOMEM; + } + return ehea_init_top_bmap(ehea_bmap->top[top], dir); +} - ehea_bmap.entries = high_section_index + 1; - ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); +static int ehea_create_busmap_callback(unsigned long pfn, + unsigned long nr_pages, void *arg) +{ + unsigned long i, mr_len, start_section, end_section; + start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; + end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); + mr_len = *(unsigned long *)arg; - if (!ehea_bmap.vaddr) + ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); + if (!ehea_bmap) return -ENOMEM; - for (i = 0 ; i < ehea_bmap.entries; i++) { - unsigned long pfn = section_nr_to_pfn(i); + for (i = start_section; i < end_section; i++) { + int ret; + int top, dir, idx; + u64 vaddr; + + top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); + dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); + + ret = ehea_init_bmap(ehea_bmap, top, dir); + if(ret) + return ret; - if (pfn_valid(pfn)) { - ehea_bmap.vaddr[i] = vaddr; - vaddr += EHEA_SECTSIZE; - ehea_bmap.valid_sections++; - } else - ehea_bmap.vaddr[i] = 0; + idx = i & EHEA_INDEX_MASK; + vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE; + + ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr; } + mr_len += nr_pages * PAGE_SIZE; + *(unsigned long *)arg = mr_len; + return 0; } +static unsigned long ehea_mr_len; + +static DEFINE_MUTEX(ehea_busmap_mutex); + +int ehea_create_busmap(void) +{ + int ret; + mutex_lock(&ehea_busmap_mutex); + ehea_mr_len = 0; + ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len, + ehea_create_busmap_callback); + mutex_unlock(&ehea_busmap_mutex); + return ret; +} + void ehea_destroy_busmap(void) { - vfree(ehea_bmap.vaddr); + int top, dir; + mutex_lock(&ehea_busmap_mutex); + if (!ehea_bmap) + goto out_destroy; + + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { + if (!ehea_bmap->top[top]) + continue; + + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { + if (!ehea_bmap->top[top]->dir[dir]) + continue; + + kfree(ehea_bmap->top[top]->dir[dir]); + } + + kfree(ehea_bmap->top[top]); + } + + kfree(ehea_bmap); + ehea_bmap = NULL; +out_destroy: + mutex_unlock(&ehea_busmap_mutex); } u64 ehea_map_vaddr(void *caddr) { - u64 mapped_addr; - unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS; - - if (likely(index < ehea_bmap.entries)) { - mapped_addr = ehea_bmap.vaddr[index]; - if (likely(mapped_addr)) - mapped_addr |= (((unsigned long)caddr) - & (EHEA_SECTSIZE - 1)); - else - mapped_addr = -1; - } else - mapped_addr = -1; - - if (unlikely(mapped_addr == -1)) - if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) - schedule_work(&ehea_rereg_mr_task); - - return mapped_addr; + int top, dir, idx; + unsigned long index, offset; + + if (!ehea_bmap) + return EHEA_INVAL_ADDR; + + index = virt_to_abs(caddr) >> SECTION_SIZE_BITS; + top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK; + if (!ehea_bmap->top[top]) + return EHEA_INVAL_ADDR; + + dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK; + if (!ehea_bmap->top[top]->dir[dir]) + return EHEA_INVAL_ADDR; + + idx = index & EHEA_INDEX_MASK; + if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) + return EHEA_INVAL_ADDR; + + offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1); + return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset; +} + +static inline void *ehea_calc_sectbase(int top, int dir, int idx) +{ + unsigned long ret = idx; + ret |= dir << EHEA_DIR_INDEX_SHIFT; + ret |= top << EHEA_TOP_INDEX_SHIFT; + return abs_to_virt(ret << SECTION_SIZE_BITS); +} + +static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, + struct ehea_adapter *adapter, + struct ehea_mr *mr) +{ + void *pg; + u64 j, m, hret; + unsigned long k = 0; + u64 pt_abs = virt_to_abs(pt); + + void *sectbase = ehea_calc_sectbase(top, dir, idx); + + for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) { + + for (m = 0; m < EHEA_MAX_RPAGE; m++) { + pg = sectbase + ((k++) * EHEA_PAGESIZE); + pt[m] = virt_to_abs(pg); + } + hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, + 0, pt_abs, EHEA_MAX_RPAGE); + + if ((hret != H_SUCCESS) + && (hret != H_PAGE_REGISTERED)) { + ehea_h_free_resource(adapter->handle, mr->handle, + FORCE_FREE); + ehea_error("register_rpage_mr failed"); + return hret; + } + } + return hret; +} + +static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt, + struct ehea_adapter *adapter, + struct ehea_mr *mr) +{ + u64 hret = H_SUCCESS; + int idx; + + for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) { + if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) + continue; + + hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr); + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) + return hret; + } + return hret; +} + +static u64 ehea_reg_mr_dir_sections(int top, u64 *pt, + struct ehea_adapter *adapter, + struct ehea_mr *mr) +{ + u64 hret = H_SUCCESS; + int dir; + + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { + if (!ehea_bmap->top[top]->dir[dir]) + continue; + + hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr); + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) + return hret; + } + return hret; } int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) { int ret; u64 *pt; - void *pg; - u64 hret, pt_abs, i, j, m, mr_len; + u64 hret; u32 acc_ctrl = EHEA_MR_ACC_CTRL; - mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; + unsigned long top; - pt = kzalloc(PAGE_SIZE, GFP_KERNEL); + pt = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!pt) { ehea_error("no mem"); ret = -ENOMEM; goto out; } - pt_abs = virt_to_abs(pt); - hret = ehea_h_alloc_resource_mr(adapter->handle, - EHEA_BUSMAP_START, mr_len, - acc_ctrl, adapter->pd, + hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START, + ehea_mr_len, acc_ctrl, adapter->pd, &mr->handle, &mr->lkey); + if (hret != H_SUCCESS) { ehea_error("alloc_resource_mr failed"); ret = -EIO; goto out; } - for (i = 0 ; i < ehea_bmap.entries; i++) - if (ehea_bmap.vaddr[i]) { - void *sectbase = __va(i << SECTION_SIZE_BITS); - unsigned long k = 0; - - for (j = 0; j < (EHEA_PAGES_PER_SECTION / - EHEA_MAX_RPAGE); j++) { - - for (m = 0; m < EHEA_MAX_RPAGE; m++) { - pg = sectbase + ((k++) * EHEA_PAGESIZE); - pt[m] = virt_to_abs(pg); - } - - hret = ehea_h_register_rpage_mr(adapter->handle, - mr->handle, - 0, 0, pt_abs, - EHEA_MAX_RPAGE); - if ((hret != H_SUCCESS) - && (hret != H_PAGE_REGISTERED)) { - ehea_h_free_resource(adapter->handle, - mr->handle, - FORCE_FREE); - ehea_error("register_rpage_mr failed"); - ret = -EIO; - goto out; - } - } - } + if (!ehea_bmap) { + ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); + ehea_error("no busmap available"); + ret = -EIO; + goto out; + } + + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { + if (!ehea_bmap->top[top]) + continue; + + hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr); + if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS)) + break; + } if (hret != H_SUCCESS) { ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index ba75efc9f5b..f0014cfbb27 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c @@ -194,7 +194,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, ret = of_address_to_resource(ofdev->node, 0, &res); if (ret) - return ret; + goto out_res; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); @@ -236,6 +236,7 @@ out_free_irqs: kfree(new_bus->irq); out_unmap_regs: iounmap(fec->fecp); +out_res: out_fec: kfree(fec); out_mii: diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 642dc633b44..393a0f17530 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -138,6 +138,7 @@ static int gfar_poll(struct napi_struct *napi, int budget); static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); +static int gfar_clean_tx_ring(struct net_device *dev); static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); static void gfar_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); @@ -634,6 +635,8 @@ static void free_skb_resources(struct gfar_private *priv) dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; } + + txbdp++; } kfree(priv->tx_skbuff); @@ -1141,7 +1144,7 @@ static int gfar_close(struct net_device *dev) } /* Changes the mac address if the controller is not running. */ -int gfar_set_mac_address(struct net_device *dev) +static int gfar_set_mac_address(struct net_device *dev) { gfar_set_mac_for_addr(dev, 0, dev->dev_addr); @@ -1260,7 +1263,7 @@ static void gfar_timeout(struct net_device *dev) } /* Interrupt Handler for Transmit complete */ -int gfar_clean_tx_ring(struct net_device *dev) +static int gfar_clean_tx_ring(struct net_device *dev) { struct txbd8 *bdp; struct gfar_private *priv = netdev_priv(dev); diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index fd487be3993..27f37c81e52 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -782,5 +782,8 @@ extern void gfar_halt(struct net_device *dev); extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, u32 regnum, u32 read); void gfar_init_sysfs(struct net_device *dev); +int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, + int regnum, u16 value); +int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum); #endif /* __GIANFAR_H */ diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c index 230878b9419..5116f68e01b 100644 --- a/drivers/net/gianfar_sysfs.c +++ b/drivers/net/gianfar_sysfs.c @@ -103,10 +103,10 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev, spin_lock_irqsave(&priv->rxlock, flags); if (length > priv->rx_buffer_size) - return count; + goto out; if (length == priv->rx_stash_size) - return count; + goto out; priv->rx_stash_size = length; @@ -125,6 +125,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev, gfar_write(&priv->regs->attr, temp); +out: spin_unlock_irqrestore(&priv->rxlock, flags); return count; @@ -154,10 +155,10 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev, spin_lock_irqsave(&priv->rxlock, flags); if (index > priv->rx_stash_size) - return count; + goto out; if (index == priv->rx_stash_index) - return count; + goto out; priv->rx_stash_index = index; @@ -166,6 +167,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev, temp |= ATTRELI_EI(index); gfar_write(&priv->regs->attreli, flags); +out: spin_unlock_irqrestore(&priv->rxlock, flags); return count; diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index a873d2b315c..a7714da7c28 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -100,7 +100,9 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); +#ifdef CONFIG_PNP static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id); +#endif /* These are the known NSC chips */ static nsc_chip_t chips[] = { @@ -156,9 +158,11 @@ static const struct pnp_device_id nsc_ircc_pnp_table[] = { MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table); static struct pnp_driver nsc_ircc_pnp_driver = { +#ifdef CONFIG_PNP .name = "nsc-ircc", .id_table = nsc_ircc_pnp_table, .probe = nsc_ircc_pnp_probe, +#endif }; /* Some prototypes */ @@ -916,6 +920,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) return 0; } +#ifdef CONFIG_PNP /* PNP probing */ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id) { @@ -952,6 +957,7 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i return 0; } +#endif /* * Function nsc_ircc_setup (info) diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 1f26da761e9..cfe0194fef7 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -376,6 +376,7 @@ MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table); static int pnp_driver_registered; +#ifdef CONFIG_PNP static int __init smsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { @@ -402,7 +403,9 @@ static struct pnp_driver smsc_ircc_pnp_driver = { .id_table = smsc_ircc_pnp_table, .probe = smsc_ircc_pnp_probe, }; - +#else /* CONFIG_PNP */ +static struct pnp_driver smsc_ircc_pnp_driver; +#endif /******************************************************************************* * diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 2056cfc624d..c36a03ae9bf 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -450,7 +450,7 @@ static void macvlan_dellink(struct net_device *dev) unregister_netdevice(dev); if (list_empty(&port->vlans)) - macvlan_port_destroy(dev); + macvlan_port_destroy(port->dev); } static struct rtnl_link_ops macvlan_link_ops __read_mostly = { diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index cb46446b269..03a9abcce52 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -551,7 +551,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, u64 mtt_seg; int err = -ENOMEM; - if (page_shift < 12 || page_shift >= 32) + if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) return -EINVAL; /* All MTTs must fit in the same page */ diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 381b36e5f64..b7915cdcc6a 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -91,6 +91,11 @@ */ #define PHY_ADDR_REG 0x0000 #define SMI_REG 0x0004 +#define WINDOW_BASE(i) (0x0200 + ((i) << 3)) +#define WINDOW_SIZE(i) (0x0204 + ((i) << 3)) +#define WINDOW_REMAP_HIGH(i) (0x0280 + ((i) << 2)) +#define WINDOW_BAR_ENABLE 0x0290 +#define WINDOW_PROTECT(i) (0x0294 + ((i) << 4)) /* * Per-port registers. @@ -507,9 +512,23 @@ struct mv643xx_mib_counters { u32 late_collision; }; +struct mv643xx_shared_private { + void __iomem *eth_base; + + /* used to protect SMI_REG, which is shared across ports */ + spinlock_t phy_lock; + + u32 win_protect; + + unsigned int t_clk; +}; + struct mv643xx_private { + struct mv643xx_shared_private *shared; int port_num; /* User Ethernet port number */ + struct mv643xx_shared_private *shared_smi; + u32 rx_sram_addr; /* Base address of rx sram area */ u32 rx_sram_size; /* Size of rx sram area */ u32 tx_sram_addr; /* Base address of tx sram area */ @@ -614,19 +633,14 @@ static const struct ethtool_ops mv643xx_ethtool_ops; static char mv643xx_driver_name[] = "mv643xx_eth"; static char mv643xx_driver_version[] = "1.0"; -static void __iomem *mv643xx_eth_base; - -/* used to protect SMI_REG, which is shared across ports */ -static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); - static inline u32 rdl(struct mv643xx_private *mp, int offset) { - return readl(mv643xx_eth_base + offset); + return readl(mp->shared->eth_base + offset); } static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) { - writel(data, mv643xx_eth_base + offset); + writel(data, mp->shared->eth_base + offset); } /* @@ -1119,7 +1133,6 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) * * INPUT: * struct mv643xx_private *mp Ethernet port - * unsigned int t_clk t_clk of the MV-643xx chip in HZ units * unsigned int delay Delay in usec * * OUTPUT: @@ -1130,10 +1143,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) * */ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, - unsigned int t_clk, unsigned int delay) + unsigned int delay) { unsigned int port_num = mp->port_num; - unsigned int coal = ((t_clk / 1000000) * delay) / 64; + unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; /* Set RX Coalescing mechanism */ wrl(mp, SDMA_CONFIG_REG(port_num), @@ -1158,7 +1171,6 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, * * INPUT: * struct mv643xx_private *mp Ethernet port - * unsigned int t_clk t_clk of the MV-643xx chip in HZ units * unsigned int delay Delay in uSeconds * * OUTPUT: @@ -1169,9 +1181,9 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp, * */ static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, - unsigned int t_clk, unsigned int delay) + unsigned int delay) { - unsigned int coal = ((t_clk / 1000000) * delay) / 64; + unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; /* Set TX Coalescing mechanism */ wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4); @@ -1413,11 +1425,11 @@ static int mv643xx_eth_open(struct net_device *dev) #ifdef MV643XX_COAL mp->rx_int_coal = - eth_port_set_rx_coal(mp, 133000000, MV643XX_RX_COAL); + eth_port_set_rx_coal(mp, MV643XX_RX_COAL); #endif mp->tx_int_coal = - eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL); + eth_port_set_tx_coal(mp, MV643XX_TX_COAL); /* Unmask phy and link status changes interrupts */ wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); @@ -1827,6 +1839,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev) return -ENODEV; } + if (pd->shared == NULL) { + printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n"); + return -ENODEV; + } + dev = alloc_etherdev(sizeof(struct mv643xx_private)); if (!dev) return -ENOMEM; @@ -1877,8 +1894,16 @@ static int mv643xx_eth_probe(struct platform_device *pdev) spin_lock_init(&mp->lock); + mp->shared = platform_get_drvdata(pd->shared); port_num = mp->port_num = pd->port_number; + if (mp->shared->win_protect) + wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect); + + mp->shared_smi = mp->shared; + if (pd->shared_smi != NULL) + mp->shared_smi = platform_get_drvdata(pd->shared_smi); + /* set default config values */ eth_port_uc_addr_get(mp, dev->dev_addr); mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; @@ -1983,30 +2008,91 @@ static int mv643xx_eth_remove(struct platform_device *pdev) return 0; } +static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp, + struct mbus_dram_target_info *dram) +{ + void __iomem *base = msp->eth_base; + u32 win_enable; + u32 win_protect; + int i; + + for (i = 0; i < 6; i++) { + writel(0, base + WINDOW_BASE(i)); + writel(0, base + WINDOW_SIZE(i)); + if (i < 4) + writel(0, base + WINDOW_REMAP_HIGH(i)); + } + + win_enable = 0x3f; + win_protect = 0; + + for (i = 0; i < dram->num_cs; i++) { + struct mbus_dram_window *cs = dram->cs + i; + + writel((cs->base & 0xffff0000) | + (cs->mbus_attr << 8) | + dram->mbus_dram_target_id, base + WINDOW_BASE(i)); + writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); + + win_enable &= ~(1 << i); + win_protect |= 3 << (2 * i); + } + + writel(win_enable, base + WINDOW_BAR_ENABLE); + msp->win_protect = win_protect; +} + static int mv643xx_eth_shared_probe(struct platform_device *pdev) { static int mv643xx_version_printed = 0; + struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; + struct mv643xx_shared_private *msp; struct resource *res; + int ret; if (!mv643xx_version_printed++) printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); + ret = -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) - return -ENODEV; + goto out; - mv643xx_eth_base = ioremap(res->start, res->end - res->start + 1); - if (mv643xx_eth_base == NULL) - return -ENOMEM; + ret = -ENOMEM; + msp = kmalloc(sizeof(*msp), GFP_KERNEL); + if (msp == NULL) + goto out; + memset(msp, 0, sizeof(*msp)); + + msp->eth_base = ioremap(res->start, res->end - res->start + 1); + if (msp->eth_base == NULL) + goto out_free; + + spin_lock_init(&msp->phy_lock); + msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; + + platform_set_drvdata(pdev, msp); + + /* + * (Re-)program MBUS remapping windows if we are asked to. + */ + if (pd != NULL && pd->dram != NULL) + mv643xx_eth_conf_mbus_windows(msp, pd->dram); return 0; +out_free: + kfree(msp); +out: + return ret; } static int mv643xx_eth_shared_remove(struct platform_device *pdev) { - iounmap(mv643xx_eth_base); - mv643xx_eth_base = NULL; + struct mv643xx_shared_private *msp = platform_get_drvdata(pdev); + + iounmap(msp->eth_base); + kfree(msp); return 0; } @@ -2906,15 +2992,16 @@ static void eth_port_reset(struct mv643xx_private *mp) static void eth_port_read_smi_reg(struct mv643xx_private *mp, unsigned int phy_reg, unsigned int *value) { + void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; int phy_addr = ethernet_phy_get(mp); unsigned long flags; int i; /* the SMI register is a shared resource */ - spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); + spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); /* wait for the SMI register to become available */ - for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { + for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) { printk("%s: PHY busy timeout\n", mp->dev->name); goto out; @@ -2922,11 +3009,11 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, udelay(PHY_WAIT_MICRO_SECONDS); } - wrl(mp, SMI_REG, - (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); + writel((phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ, + smi_reg); /* now wait for the data to be valid */ - for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) { + for (i = 0; !(readl(smi_reg) & ETH_SMI_READ_VALID); i++) { if (i == PHY_WAIT_ITERATIONS) { printk("%s: PHY read timeout\n", mp->dev->name); goto out; @@ -2934,9 +3021,9 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp, udelay(PHY_WAIT_MICRO_SECONDS); } - *value = rdl(mp, SMI_REG) & 0xffff; + *value = readl(smi_reg) & 0xffff; out: - spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); + spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); } /* @@ -2962,17 +3049,16 @@ out: static void eth_port_write_smi_reg(struct mv643xx_private *mp, unsigned int phy_reg, unsigned int value) { - int phy_addr; - int i; + void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; + int phy_addr = ethernet_phy_get(mp); unsigned long flags; - - phy_addr = ethernet_phy_get(mp); + int i; /* the SMI register is a shared resource */ - spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); + spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); /* wait for the SMI register to become available */ - for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) { + for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) { printk("%s: PHY busy timeout\n", mp->dev->name); goto out; @@ -2980,10 +3066,10 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp, udelay(PHY_WAIT_MICRO_SECONDS); } - wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) | - ETH_SMI_OPCODE_WRITE | (value & 0xffff)); + writel((phy_addr << 16) | (phy_reg << 21) | + ETH_SMI_OPCODE_WRITE | (value & 0xffff), smi_reg); out: - spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); + spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); } /* diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index ef63c8d2bd7..c91b12ea26a 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -144,11 +144,13 @@ struct myri10ge_tx_buf { char *req_bytes; struct myri10ge_tx_buffer_state *info; int mask; /* number of transmit slots -1 */ - int boundary; /* boundary transmits cannot cross */ int req ____cacheline_aligned; /* transmit slots submitted */ int pkt_start; /* packets started */ + int stop_queue; + int linearized; int done ____cacheline_aligned; /* transmit slots completed */ int pkt_done; /* packets completed */ + int wake_queue; }; struct myri10ge_rx_done { @@ -160,29 +162,50 @@ struct myri10ge_rx_done { struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; }; -struct myri10ge_priv { - int running; /* running? */ - int csum_flag; /* rx_csums? */ +struct myri10ge_slice_netstats { + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long rx_dropped; + unsigned long tx_dropped; +}; + +struct myri10ge_slice_state { struct myri10ge_tx_buf tx; /* transmit ring */ struct myri10ge_rx_buf rx_small; struct myri10ge_rx_buf rx_big; struct myri10ge_rx_done rx_done; + struct net_device *dev; + struct napi_struct napi; + struct myri10ge_priv *mgp; + struct myri10ge_slice_netstats stats; + __be32 __iomem *irq_claim; + struct mcp_irq_data *fw_stats; + dma_addr_t fw_stats_bus; + int watchdog_tx_done; + int watchdog_tx_req; +}; + +struct myri10ge_priv { + struct myri10ge_slice_state ss; + int tx_boundary; /* boundary transmits cannot cross */ + int running; /* running? */ + int csum_flag; /* rx_csums? */ int small_bytes; int big_bytes; + int max_intr_slots; struct net_device *dev; - struct napi_struct napi; struct net_device_stats stats; + spinlock_t stats_lock; u8 __iomem *sram; int sram_size; unsigned long board_span; unsigned long iomem_base; - __be32 __iomem *irq_claim; __be32 __iomem *irq_deassert; char *mac_addr_string; struct mcp_cmd_response *cmd; dma_addr_t cmd_bus; - struct mcp_irq_data *fw_stats; - dma_addr_t fw_stats_bus; struct pci_dev *pdev; int msi_enabled; u32 link_state; @@ -191,20 +214,16 @@ struct myri10ge_priv { __be32 __iomem *intr_coal_delay_ptr; int mtrr; int wc_enabled; - int wake_queue; - int stop_queue; int down_cnt; wait_queue_head_t down_wq; struct work_struct watchdog_work; struct timer_list watchdog_timer; - int watchdog_tx_done; - int watchdog_tx_req; - int watchdog_pause; int watchdog_resets; - int tx_linearized; + int watchdog_pause; int pause; char *fw_name; char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; + char *product_code_string; char fw_version[128]; int fw_ver_major; int fw_ver_minor; @@ -228,58 +247,54 @@ static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; static char *myri10ge_fw_name = NULL; module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n"); +MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name"); static int myri10ge_ecrc_enable = 1; module_param(myri10ge_ecrc_enable, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n"); - -static int myri10ge_max_intr_slots = 1024; -module_param(myri10ge_max_intr_slots, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n"); +MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E"); static int myri10ge_small_bytes = -1; /* -1 == auto */ module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n"); +MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets"); static int myri10ge_msi = 1; /* enable msi by default */ module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); +MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts"); static int myri10ge_intr_coal_delay = 75; module_param(myri10ge_intr_coal_delay, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n"); +MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay"); static int myri10ge_flow_control = 1; module_param(myri10ge_flow_control, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n"); +MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter"); static int myri10ge_deassert_wait = 1; module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(myri10ge_deassert_wait, - "Wait when deasserting legacy interrupts\n"); + "Wait when deasserting legacy interrupts"); static int myri10ge_force_firmware = 0; module_param(myri10ge_force_firmware, int, S_IRUGO); MODULE_PARM_DESC(myri10ge_force_firmware, - "Force firmware to assume aligned completions\n"); + "Force firmware to assume aligned completions"); static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; module_param(myri10ge_initial_mtu, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n"); +MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU"); static int myri10ge_napi_weight = 64; module_param(myri10ge_napi_weight, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n"); +MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight"); static int myri10ge_watchdog_timeout = 1; module_param(myri10ge_watchdog_timeout, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n"); +MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout"); static int myri10ge_max_irq_loops = 1048576; module_param(myri10ge_max_irq_loops, int, S_IRUGO); MODULE_PARM_DESC(myri10ge_max_irq_loops, - "Set stuck legacy IRQ detection threshold\n"); + "Set stuck legacy IRQ detection threshold"); #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK @@ -289,21 +304,22 @@ MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); static int myri10ge_lro = 1; module_param(myri10ge_lro, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload\n"); +MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload"); static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; module_param(myri10ge_lro_max_pkts, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_lro, "Number of LRO packets to be aggregated\n"); +MODULE_PARM_DESC(myri10ge_lro_max_pkts, + "Number of LRO packets to be aggregated"); static int myri10ge_fill_thresh = 256; module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n"); +MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); static int myri10ge_reset_recover = 1; static int myri10ge_wcfifo = 0; module_param(myri10ge_wcfifo, int, S_IRUGO); -MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n"); +MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled"); #define MYRI10GE_FW_OFFSET 1024*1024 #define MYRI10GE_HIGHPART_TO_U32(X) \ @@ -359,8 +375,10 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd, for (sleep_total = 0; sleep_total < 1000 && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); - sleep_total += 10) + sleep_total += 10) { udelay(10); + mb(); + } } else { /* use msleep for most command */ for (sleep_total = 0; @@ -420,6 +438,10 @@ static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp) ptr += 1; } } + if (memcmp(ptr, "PC=", 3) == 0) { + ptr += 3; + mgp->product_code_string = ptr; + } if (memcmp((const void *)ptr, "SN=", 3) == 0) { ptr += 3; mgp->serial_number = simple_strtoul(ptr, &ptr, 10); @@ -442,7 +464,7 @@ abort: static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) { char __iomem *submit; - __be32 buf[16]; + __be32 buf[16] __attribute__ ((__aligned__(8))); u32 dma_low, dma_high; int i; @@ -609,13 +631,38 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) return status; } +int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) +{ + struct myri10ge_cmd cmd; + int status; + + /* probe for IPv6 TSO support */ + mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, + &cmd, 0); + if (status == 0) { + mgp->max_tso6 = cmd.data0; + mgp->features |= NETIF_F_TSO6; + } + + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); + if (status != 0) { + dev_err(&mgp->pdev->dev, + "failed MXGEFW_CMD_GET_RX_RING_SIZE\n"); + return -ENXIO; + } + + mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr)); + + return 0; +} + static int myri10ge_load_firmware(struct myri10ge_priv *mgp) { char __iomem *submit; - __be32 buf[16]; + __be32 buf[16] __attribute__ ((__aligned__(8))); u32 dma_low, dma_high, size; int status, i; - struct myri10ge_cmd cmd; size = 0; status = myri10ge_load_hotplug_firmware(mgp, &size); @@ -635,7 +682,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) } dev_info(&mgp->pdev->dev, "Successfully adopted running firmware\n"); - if (mgp->tx.boundary == 4096) { + if (mgp->tx_boundary == 4096) { dev_warn(&mgp->pdev->dev, "Using firmware currently running on NIC" ". For optimal\n"); @@ -646,7 +693,9 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) } mgp->fw_name = "adopted"; - mgp->tx.boundary = 2048; + mgp->tx_boundary = 2048; + myri10ge_dummy_rdma(mgp, 1); + status = myri10ge_get_firmware_capabilities(mgp); return status; } @@ -681,26 +730,18 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) msleep(1); mb(); i = 0; - while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) { - msleep(1); + while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) { + msleep(1 << i); i++; } if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { dev_err(&mgp->pdev->dev, "handoff failed\n"); return -ENXIO; } - dev_info(&mgp->pdev->dev, "handoff confirmed\n"); myri10ge_dummy_rdma(mgp, 1); + status = myri10ge_get_firmware_capabilities(mgp); - /* probe for IPv6 TSO support */ - mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; - status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, - &cmd, 0); - if (status == 0) { - mgp->max_tso6 = cmd.data0; - mgp->features |= NETIF_F_TSO6; - } - return 0; + return status; } static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) @@ -772,7 +813,7 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) * transfers took to complete. */ - len = mgp->tx.boundary; + len = mgp->tx_boundary; cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); @@ -834,17 +875,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) /* Now exchange information about interrupts */ - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); - memset(mgp->rx_done.entry, 0, bytes); + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); + memset(mgp->ss.rx_done.entry, 0, bytes); cmd.data0 = (u32) bytes; status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); - cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); - cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); + cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus); + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); - mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); + mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd, 0); mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); @@ -858,17 +899,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) } put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); - memset(mgp->rx_done.entry, 0, bytes); + memset(mgp->ss.rx_done.entry, 0, bytes); /* reset mcp/driver shared state back to 0 */ - mgp->tx.req = 0; - mgp->tx.done = 0; - mgp->tx.pkt_start = 0; - mgp->tx.pkt_done = 0; - mgp->rx_big.cnt = 0; - mgp->rx_small.cnt = 0; - mgp->rx_done.idx = 0; - mgp->rx_done.cnt = 0; + mgp->ss.tx.req = 0; + mgp->ss.tx.done = 0; + mgp->ss.tx.pkt_start = 0; + mgp->ss.tx.pkt_done = 0; + mgp->ss.rx_big.cnt = 0; + mgp->ss.rx_small.cnt = 0; + mgp->ss.rx_done.idx = 0; + mgp->ss.rx_done.cnt = 0; mgp->link_changes = 0; status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); myri10ge_change_pause(mgp, mgp->pause); @@ -1020,9 +1061,10 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, * page into an skb */ static inline int -myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, +myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, int bytes, int len, __wsum csum) { + struct myri10ge_priv *mgp = ss->mgp; struct sk_buff *skb; struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; int i, idx, hlen, remainder; @@ -1052,11 +1094,10 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, rx_frags[0].page_offset += MXGEFW_PAD; rx_frags[0].size -= MXGEFW_PAD; len -= MXGEFW_PAD; - lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags, + lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, len, len, - /* opaque, will come back in get_frag_header */ - (void *)(__force unsigned long)csum, - csum); + /* opaque, will come back in get_frag_header */ + (void *)(__force unsigned long)csum, csum); return 1; } @@ -1096,10 +1137,11 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, return 1; } -static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) +static inline void +myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) { - struct pci_dev *pdev = mgp->pdev; - struct myri10ge_tx_buf *tx = &mgp->tx; + struct pci_dev *pdev = ss->mgp->pdev; + struct myri10ge_tx_buf *tx = &ss->tx; struct sk_buff *skb; int idx, len; @@ -1117,8 +1159,8 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) len = pci_unmap_len(&tx->info[idx], len); pci_unmap_len_set(&tx->info[idx], len, 0); if (skb) { - mgp->stats.tx_bytes += skb->len; - mgp->stats.tx_packets++; + ss->stats.tx_bytes += skb->len; + ss->stats.tx_packets++; dev_kfree_skb_irq(skb); if (len) pci_unmap_single(pdev, @@ -1134,16 +1176,18 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) } } /* start the queue if we've stopped it */ - if (netif_queue_stopped(mgp->dev) + if (netif_queue_stopped(ss->dev) && tx->req - tx->done < (tx->mask >> 1)) { - mgp->wake_queue++; - netif_wake_queue(mgp->dev); + tx->wake_queue++; + netif_wake_queue(ss->dev); } } -static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) +static inline int +myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) { - struct myri10ge_rx_done *rx_done = &mgp->rx_done; + struct myri10ge_rx_done *rx_done = &ss->rx_done; + struct myri10ge_priv *mgp = ss->mgp; unsigned long rx_bytes = 0; unsigned long rx_packets = 0; unsigned long rx_ok; @@ -1159,40 +1203,40 @@ static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) rx_done->entry[idx].length = 0; checksum = csum_unfold(rx_done->entry[idx].checksum); if (length <= mgp->small_bytes) - rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small, + rx_ok = myri10ge_rx_done(ss, &ss->rx_small, mgp->small_bytes, length, checksum); else - rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, + rx_ok = myri10ge_rx_done(ss, &ss->rx_big, mgp->big_bytes, length, checksum); rx_packets += rx_ok; rx_bytes += rx_ok * (unsigned long)length; cnt++; - idx = cnt & (myri10ge_max_intr_slots - 1); + idx = cnt & (mgp->max_intr_slots - 1); work_done++; } rx_done->idx = idx; rx_done->cnt = cnt; - mgp->stats.rx_packets += rx_packets; - mgp->stats.rx_bytes += rx_bytes; + ss->stats.rx_packets += rx_packets; + ss->stats.rx_bytes += rx_bytes; if (myri10ge_lro) lro_flush_all(&rx_done->lro_mgr); /* restock receive rings if needed */ - if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, + if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, mgp->small_bytes + MXGEFW_PAD, 0); - if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); + if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); return work_done; } static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) { - struct mcp_irq_data *stats = mgp->fw_stats; + struct mcp_irq_data *stats = mgp->ss.fw_stats; if (unlikely(stats->stats_updated)) { unsigned link_up = ntohl(stats->link_up); @@ -1219,9 +1263,9 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) } } if (mgp->rdma_tags_available != - ntohl(mgp->fw_stats->rdma_tags_available)) { + ntohl(stats->rdma_tags_available)) { mgp->rdma_tags_available = - ntohl(mgp->fw_stats->rdma_tags_available); + ntohl(stats->rdma_tags_available); printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " "%d tags left\n", mgp->dev->name, mgp->rdma_tags_available); @@ -1234,26 +1278,27 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) static int myri10ge_poll(struct napi_struct *napi, int budget) { - struct myri10ge_priv *mgp = - container_of(napi, struct myri10ge_priv, napi); - struct net_device *netdev = mgp->dev; + struct myri10ge_slice_state *ss = + container_of(napi, struct myri10ge_slice_state, napi); + struct net_device *netdev = ss->mgp->dev; int work_done; /* process as many rx events as NAPI will allow */ - work_done = myri10ge_clean_rx_done(mgp, budget); + work_done = myri10ge_clean_rx_done(ss, budget); if (work_done < budget) { netif_rx_complete(netdev, napi); - put_be32(htonl(3), mgp->irq_claim); + put_be32(htonl(3), ss->irq_claim); } return work_done; } static irqreturn_t myri10ge_intr(int irq, void *arg) { - struct myri10ge_priv *mgp = arg; - struct mcp_irq_data *stats = mgp->fw_stats; - struct myri10ge_tx_buf *tx = &mgp->tx; + struct myri10ge_slice_state *ss = arg; + struct myri10ge_priv *mgp = ss->mgp; + struct mcp_irq_data *stats = ss->fw_stats; + struct myri10ge_tx_buf *tx = &ss->tx; u32 send_done_count; int i; @@ -1264,7 +1309,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) /* low bit indicates receives are present, so schedule * napi poll handler */ if (stats->valid & 1) - netif_rx_schedule(mgp->dev, &mgp->napi); + netif_rx_schedule(ss->dev, &ss->napi); if (!mgp->msi_enabled) { put_be32(0, mgp->irq_deassert); @@ -1281,7 +1326,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) /* check for transmit completes and receives */ send_done_count = ntohl(stats->send_done_count); if (send_done_count != tx->pkt_done) - myri10ge_tx_done(mgp, (int)send_done_count); + myri10ge_tx_done(ss, (int)send_done_count); if (unlikely(i > myri10ge_max_irq_loops)) { printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", mgp->dev->name); @@ -1296,16 +1341,46 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) myri10ge_check_statblock(mgp); - put_be32(htonl(3), mgp->irq_claim + 1); + put_be32(htonl(3), ss->irq_claim + 1); return (IRQ_HANDLED); } static int myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { + struct myri10ge_priv *mgp = netdev_priv(netdev); + char *ptr; + int i; + cmd->autoneg = AUTONEG_DISABLE; cmd->speed = SPEED_10000; cmd->duplex = DUPLEX_FULL; + + /* + * parse the product code to deterimine the interface type + * (CX4, XFP, Quad Ribbon Fiber) by looking at the character + * after the 3rd dash in the driver's cached copy of the + * EEPROM's product code string. + */ + ptr = mgp->product_code_string; + if (ptr == NULL) { + printk(KERN_ERR "myri10ge: %s: Missing product code\n", + netdev->name); + return 0; + } + for (i = 0; i < 3; i++, ptr++) { + ptr = strchr(ptr, '-'); + if (ptr == NULL) { + printk(KERN_ERR "myri10ge: %s: Invalid product " + "code %s\n", netdev->name, + mgp->product_code_string); + return 0; + } + } + if (*ptr == 'R' || *ptr == 'Q') { + /* We've found either an XFP or quad ribbon fiber */ + cmd->port = PORT_FIBRE; + } return 0; } @@ -1324,6 +1399,7 @@ static int myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) { struct myri10ge_priv *mgp = netdev_priv(netdev); + coal->rx_coalesce_usecs = mgp->intr_coal_delay; return 0; } @@ -1370,10 +1446,10 @@ myri10ge_get_ringparam(struct net_device *netdev, { struct myri10ge_priv *mgp = netdev_priv(netdev); - ring->rx_mini_max_pending = mgp->rx_small.mask + 1; - ring->rx_max_pending = mgp->rx_big.mask + 1; + ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1; + ring->rx_max_pending = mgp->ss.rx_big.mask + 1; ring->rx_jumbo_max_pending = 0; - ring->tx_max_pending = mgp->rx_small.mask + 1; + ring->tx_max_pending = mgp->ss.rx_small.mask + 1; ring->rx_mini_pending = ring->rx_mini_max_pending; ring->rx_pending = ring->rx_max_pending; ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; @@ -1383,6 +1459,7 @@ myri10ge_get_ringparam(struct net_device *netdev, static u32 myri10ge_get_rx_csum(struct net_device *netdev) { struct myri10ge_priv *mgp = netdev_priv(netdev); + if (mgp->csum_flag) return 1; else @@ -1392,6 +1469,7 @@ static u32 myri10ge_get_rx_csum(struct net_device *netdev) static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) { struct myri10ge_priv *mgp = netdev_priv(netdev); + if (csum_enabled) mgp->csum_flag = MXGEFW_FLAGS_CKSUM; else @@ -1411,7 +1489,7 @@ static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) return 0; } -static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { +static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", "rx_length_errors", "rx_over_errors", "rx_crc_errors", @@ -1421,28 +1499,39 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { /* device-specific stats */ "tx_boundary", "WC", "irq", "MSI", "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", - "serial_number", "tx_pkt_start", "tx_pkt_done", - "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", - "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized", + "serial_number", "watchdog_resets", "link_changes", "link_up", "dropped_link_overflow", "dropped_link_error_or_filtered", "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", "dropped_unicast_filtered", "dropped_multicast_filtered", "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", - "dropped_no_big_buffer", "LRO aggregated", "LRO flushed", + "dropped_no_big_buffer" +}; + +static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { + "----------- slice ---------", + "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", + "rx_small_cnt", "rx_big_cnt", + "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated", + "LRO flushed", "LRO avg aggr", "LRO no_desc" }; #define MYRI10GE_NET_STATS_LEN 21 -#define MYRI10GE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_stats) +#define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats) +#define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats) static void myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) { switch (stringset) { case ETH_SS_STATS: - memcpy(data, *myri10ge_gstrings_stats, - sizeof(myri10ge_gstrings_stats)); + memcpy(data, *myri10ge_gstrings_main_stats, + sizeof(myri10ge_gstrings_main_stats)); + data += sizeof(myri10ge_gstrings_main_stats); + memcpy(data, *myri10ge_gstrings_slice_stats, + sizeof(myri10ge_gstrings_slice_stats)); + data += sizeof(myri10ge_gstrings_slice_stats); break; } } @@ -1451,7 +1540,7 @@ static int myri10ge_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: - return MYRI10GE_STATS_LEN; + return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN; default: return -EOPNOTSUPP; } @@ -1462,12 +1551,13 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 * data) { struct myri10ge_priv *mgp = netdev_priv(netdev); + struct myri10ge_slice_state *ss; int i; for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) data[i] = ((unsigned long *)&mgp->stats)[i]; - data[i++] = (unsigned int)mgp->tx.boundary; + data[i++] = (unsigned int)mgp->tx_boundary; data[i++] = (unsigned int)mgp->wc_enabled; data[i++] = (unsigned int)mgp->pdev->irq; data[i++] = (unsigned int)mgp->msi_enabled; @@ -1475,40 +1565,44 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i++] = (unsigned int)mgp->write_dma; data[i++] = (unsigned int)mgp->read_write_dma; data[i++] = (unsigned int)mgp->serial_number; - data[i++] = (unsigned int)mgp->tx.pkt_start; - data[i++] = (unsigned int)mgp->tx.pkt_done; - data[i++] = (unsigned int)mgp->tx.req; - data[i++] = (unsigned int)mgp->tx.done; - data[i++] = (unsigned int)mgp->rx_small.cnt; - data[i++] = (unsigned int)mgp->rx_big.cnt; - data[i++] = (unsigned int)mgp->wake_queue; - data[i++] = (unsigned int)mgp->stop_queue; data[i++] = (unsigned int)mgp->watchdog_resets; - data[i++] = (unsigned int)mgp->tx_linearized; data[i++] = (unsigned int)mgp->link_changes; - data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow); - data[i++] = - (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_pause); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_phy); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_crc32); + + /* firmware stats are useful only in the first slice */ + ss = &mgp->ss; + data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); data[i++] = - (unsigned int)ntohl(mgp->fw_stats->dropped_unicast_filtered); + (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); data[i++] = - (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); - data[i++] = mgp->rx_done.lro_mgr.stats.aggregated; - data[i++] = mgp->rx_done.lro_mgr.stats.flushed; - if (mgp->rx_done.lro_mgr.stats.flushed) - data[i++] = mgp->rx_done.lro_mgr.stats.aggregated / - mgp->rx_done.lro_mgr.stats.flushed; + (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); + + data[i++] = 0; + data[i++] = (unsigned int)ss->tx.pkt_start; + data[i++] = (unsigned int)ss->tx.pkt_done; + data[i++] = (unsigned int)ss->tx.req; + data[i++] = (unsigned int)ss->tx.done; + data[i++] = (unsigned int)ss->rx_small.cnt; + data[i++] = (unsigned int)ss->rx_big.cnt; + data[i++] = (unsigned int)ss->tx.wake_queue; + data[i++] = (unsigned int)ss->tx.stop_queue; + data[i++] = (unsigned int)ss->tx.linearized; + data[i++] = ss->rx_done.lro_mgr.stats.aggregated; + data[i++] = ss->rx_done.lro_mgr.stats.flushed; + if (ss->rx_done.lro_mgr.stats.flushed) + data[i++] = ss->rx_done.lro_mgr.stats.aggregated / + ss->rx_done.lro_mgr.stats.flushed; else data[i++] = 0; - data[i++] = mgp->rx_done.lro_mgr.stats.no_desc; + data[i++] = ss->rx_done.lro_mgr.stats.no_desc; } static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) @@ -1544,19 +1638,17 @@ static const struct ethtool_ops myri10ge_ethtool_ops = { .get_msglevel = myri10ge_get_msglevel }; -static int myri10ge_allocate_rings(struct net_device *dev) +static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) { - struct myri10ge_priv *mgp; + struct myri10ge_priv *mgp = ss->mgp; struct myri10ge_cmd cmd; + struct net_device *dev = mgp->dev; int tx_ring_size, rx_ring_size; int tx_ring_entries, rx_ring_entries; int i, status; size_t bytes; - mgp = netdev_priv(dev); - /* get ring sizes */ - status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); tx_ring_size = cmd.data0; status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); @@ -1566,144 +1658,142 @@ static int myri10ge_allocate_rings(struct net_device *dev) tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); - mgp->tx.mask = tx_ring_entries - 1; - mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1; + ss->tx.mask = tx_ring_entries - 1; + ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; status = -ENOMEM; /* allocate the host shadow rings */ bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) - * sizeof(*mgp->tx.req_list); - mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); - if (mgp->tx.req_bytes == NULL) + * sizeof(*ss->tx.req_list); + ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); + if (ss->tx.req_bytes == NULL) goto abort_with_nothing; /* ensure req_list entries are aligned to 8 bytes */ - mgp->tx.req_list = (struct mcp_kreq_ether_send *) - ALIGN((unsigned long)mgp->tx.req_bytes, 8); + ss->tx.req_list = (struct mcp_kreq_ether_send *) + ALIGN((unsigned long)ss->tx.req_bytes, 8); - bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow); - mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); - if (mgp->rx_small.shadow == NULL) + bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); + ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_small.shadow == NULL) goto abort_with_tx_req_bytes; - bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow); - mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); - if (mgp->rx_big.shadow == NULL) + bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); + ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_big.shadow == NULL) goto abort_with_rx_small_shadow; /* allocate the host info rings */ - bytes = tx_ring_entries * sizeof(*mgp->tx.info); - mgp->tx.info = kzalloc(bytes, GFP_KERNEL); - if (mgp->tx.info == NULL) + bytes = tx_ring_entries * sizeof(*ss->tx.info); + ss->tx.info = kzalloc(bytes, GFP_KERNEL); + if (ss->tx.info == NULL) goto abort_with_rx_big_shadow; - bytes = rx_ring_entries * sizeof(*mgp->rx_small.info); - mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL); - if (mgp->rx_small.info == NULL) + bytes = rx_ring_entries * sizeof(*ss->rx_small.info); + ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_small.info == NULL) goto abort_with_tx_info; - bytes = rx_ring_entries * sizeof(*mgp->rx_big.info); - mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL); - if (mgp->rx_big.info == NULL) + bytes = rx_ring_entries * sizeof(*ss->rx_big.info); + ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_big.info == NULL) goto abort_with_rx_small_info; /* Fill the receive rings */ - mgp->rx_big.cnt = 0; - mgp->rx_small.cnt = 0; - mgp->rx_big.fill_cnt = 0; - mgp->rx_small.fill_cnt = 0; - mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; - mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; - mgp->rx_small.watchdog_needed = 0; - mgp->rx_big.watchdog_needed = 0; - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, + ss->rx_big.cnt = 0; + ss->rx_small.cnt = 0; + ss->rx_big.fill_cnt = 0; + ss->rx_small.fill_cnt = 0; + ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; + ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; + ss->rx_small.watchdog_needed = 0; + ss->rx_big.watchdog_needed = 0; + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, mgp->small_bytes + MXGEFW_PAD, 0); - if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) { + if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", - dev->name, mgp->rx_small.fill_cnt); + dev->name, ss->rx_small.fill_cnt); goto abort_with_rx_small_ring; } - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); - if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) { + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); + if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", - dev->name, mgp->rx_big.fill_cnt); + dev->name, ss->rx_big.fill_cnt); goto abort_with_rx_big_ring; } return 0; abort_with_rx_big_ring: - for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { - int idx = i & mgp->rx_big.mask; - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], + for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { + int idx = i & ss->rx_big.mask; + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], mgp->big_bytes); - put_page(mgp->rx_big.info[idx].page); + put_page(ss->rx_big.info[idx].page); } abort_with_rx_small_ring: - for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { - int idx = i & mgp->rx_small.mask; - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], + for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { + int idx = i & ss->rx_small.mask; + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], mgp->small_bytes + MXGEFW_PAD); - put_page(mgp->rx_small.info[idx].page); + put_page(ss->rx_small.info[idx].page); } - kfree(mgp->rx_big.info); + kfree(ss->rx_big.info); abort_with_rx_small_info: - kfree(mgp->rx_small.info); + kfree(ss->rx_small.info); abort_with_tx_info: - kfree(mgp->tx.info); + kfree(ss->tx.info); abort_with_rx_big_shadow: - kfree(mgp->rx_big.shadow); + kfree(ss->rx_big.shadow); abort_with_rx_small_shadow: - kfree(mgp->rx_small.shadow); + kfree(ss->rx_small.shadow); abort_with_tx_req_bytes: - kfree(mgp->tx.req_bytes); - mgp->tx.req_bytes = NULL; - mgp->tx.req_list = NULL; + kfree(ss->tx.req_bytes); + ss->tx.req_bytes = NULL; + ss->tx.req_list = NULL; abort_with_nothing: return status; } -static void myri10ge_free_rings(struct net_device *dev) +static void myri10ge_free_rings(struct myri10ge_slice_state *ss) { - struct myri10ge_priv *mgp; + struct myri10ge_priv *mgp = ss->mgp; struct sk_buff *skb; struct myri10ge_tx_buf *tx; int i, len, idx; - mgp = netdev_priv(dev); - - for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { - idx = i & mgp->rx_big.mask; - if (i == mgp->rx_big.fill_cnt - 1) - mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], + for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { + idx = i & ss->rx_big.mask; + if (i == ss->rx_big.fill_cnt - 1) + ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], mgp->big_bytes); - put_page(mgp->rx_big.info[idx].page); + put_page(ss->rx_big.info[idx].page); } - for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { - idx = i & mgp->rx_small.mask; - if (i == mgp->rx_small.fill_cnt - 1) - mgp->rx_small.info[idx].page_offset = + for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { + idx = i & ss->rx_small.mask; + if (i == ss->rx_small.fill_cnt - 1) + ss->rx_small.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], mgp->small_bytes + MXGEFW_PAD); - put_page(mgp->rx_small.info[idx].page); + put_page(ss->rx_small.info[idx].page); } - tx = &mgp->tx; + tx = &ss->tx; while (tx->done != tx->req) { idx = tx->done & tx->mask; skb = tx->info[idx].skb; @@ -1714,7 +1804,7 @@ static void myri10ge_free_rings(struct net_device *dev) len = pci_unmap_len(&tx->info[idx], len); pci_unmap_len_set(&tx->info[idx], len, 0); if (skb) { - mgp->stats.tx_dropped++; + ss->stats.tx_dropped++; dev_kfree_skb_any(skb); if (len) pci_unmap_single(mgp->pdev, @@ -1729,19 +1819,19 @@ static void myri10ge_free_rings(struct net_device *dev) PCI_DMA_TODEVICE); } } - kfree(mgp->rx_big.info); + kfree(ss->rx_big.info); - kfree(mgp->rx_small.info); + kfree(ss->rx_small.info); - kfree(mgp->tx.info); + kfree(ss->tx.info); - kfree(mgp->rx_big.shadow); + kfree(ss->rx_big.shadow); - kfree(mgp->rx_small.shadow); + kfree(ss->rx_small.shadow); - kfree(mgp->tx.req_bytes); - mgp->tx.req_bytes = NULL; - mgp->tx.req_list = NULL; + kfree(ss->tx.req_bytes); + ss->tx.req_bytes = NULL; + ss->tx.req_list = NULL; } static int myri10ge_request_irq(struct myri10ge_priv *mgp) @@ -1840,13 +1930,11 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, static int myri10ge_open(struct net_device *dev) { - struct myri10ge_priv *mgp; + struct myri10ge_priv *mgp = netdev_priv(dev); struct myri10ge_cmd cmd; struct net_lro_mgr *lro_mgr; int status, big_pow2; - mgp = netdev_priv(dev); - if (mgp->running != MYRI10GE_ETH_STOPPED) return -EBUSY; @@ -1883,16 +1971,16 @@ static int myri10ge_open(struct net_device *dev) /* get the lanai pointers to the send and receive rings */ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); - mgp->tx.lanai = + mgp->ss.tx.lanai = (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); - mgp->rx_small.lanai = + mgp->ss.rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); - mgp->rx_big.lanai = + mgp->ss.rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); if (status != 0) { @@ -1904,15 +1992,15 @@ static int myri10ge_open(struct net_device *dev) } if (myri10ge_wcfifo && mgp->wc_enabled) { - mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; - mgp->rx_small.wc_fifo = + mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; + mgp->ss.rx_small.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; - mgp->rx_big.wc_fifo = + mgp->ss.rx_big.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG; } else { - mgp->tx.wc_fifo = NULL; - mgp->rx_small.wc_fifo = NULL; - mgp->rx_big.wc_fifo = NULL; + mgp->ss.tx.wc_fifo = NULL; + mgp->ss.rx_small.wc_fifo = NULL; + mgp->ss.rx_big.wc_fifo = NULL; } /* Firmware needs the big buff size as a power of 2. Lie and @@ -1929,7 +2017,7 @@ static int myri10ge_open(struct net_device *dev) mgp->big_bytes = big_pow2; } - status = myri10ge_allocate_rings(dev); + status = myri10ge_allocate_rings(&mgp->ss); if (status != 0) goto abort_with_irq; @@ -1948,12 +2036,12 @@ static int myri10ge_open(struct net_device *dev) goto abort_with_rings; } - cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus); - cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus); + cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus); + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus); cmd.data2 = sizeof(struct mcp_irq_data); status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); if (status == -ENOSYS) { - dma_addr_t bus = mgp->fw_stats_bus; + dma_addr_t bus = mgp->ss.fw_stats_bus; bus += offsetof(struct mcp_irq_data, send_done_count); cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); @@ -1974,20 +2062,20 @@ static int myri10ge_open(struct net_device *dev) mgp->link_state = ~0U; mgp->rdma_tags_available = 15; - lro_mgr = &mgp->rx_done.lro_mgr; + lro_mgr = &mgp->ss.rx_done.lro_mgr; lro_mgr->dev = dev; lro_mgr->features = LRO_F_NAPI; lro_mgr->ip_summed = CHECKSUM_COMPLETE; lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; - lro_mgr->lro_arr = mgp->rx_done.lro_desc; + lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc; lro_mgr->get_frag_header = myri10ge_get_frag_header; lro_mgr->max_aggr = myri10ge_lro_max_pkts; lro_mgr->frag_align_pad = 2; if (lro_mgr->max_aggr > MAX_SKB_FRAGS) lro_mgr->max_aggr = MAX_SKB_FRAGS; - napi_enable(&mgp->napi); /* must happen prior to any irq */ + napi_enable(&mgp->ss.napi); /* must happen prior to any irq */ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); if (status) { @@ -1996,8 +2084,8 @@ static int myri10ge_open(struct net_device *dev) goto abort_with_rings; } - mgp->wake_queue = 0; - mgp->stop_queue = 0; + mgp->ss.tx.wake_queue = 0; + mgp->ss.tx.stop_queue = 0; mgp->running = MYRI10GE_ETH_RUNNING; mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; add_timer(&mgp->watchdog_timer); @@ -2005,7 +2093,7 @@ static int myri10ge_open(struct net_device *dev) return 0; abort_with_rings: - myri10ge_free_rings(dev); + myri10ge_free_rings(&mgp->ss); abort_with_irq: myri10ge_free_irq(mgp); @@ -2017,21 +2105,19 @@ abort_with_nothing: static int myri10ge_close(struct net_device *dev) { - struct myri10ge_priv *mgp; + struct myri10ge_priv *mgp = netdev_priv(dev); struct myri10ge_cmd cmd; int status, old_down_cnt; - mgp = netdev_priv(dev); - if (mgp->running != MYRI10GE_ETH_RUNNING) return 0; - if (mgp->tx.req_bytes == NULL) + if (mgp->ss.tx.req_bytes == NULL) return 0; del_timer_sync(&mgp->watchdog_timer); mgp->running = MYRI10GE_ETH_STOPPING; - napi_disable(&mgp->napi); + napi_disable(&mgp->ss.napi); netif_carrier_off(dev); netif_stop_queue(dev); old_down_cnt = mgp->down_cnt; @@ -2047,7 +2133,7 @@ static int myri10ge_close(struct net_device *dev) netif_tx_disable(dev); myri10ge_free_irq(mgp); - myri10ge_free_rings(dev); + myri10ge_free_rings(&mgp->ss); mgp->running = MYRI10GE_ETH_STOPPED; return 0; @@ -2143,7 +2229,7 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx, /* * Transmit a packet. We need to split the packet so that a single - * segment does not cross myri10ge->tx.boundary, so this makes segment + * segment does not cross myri10ge->tx_boundary, so this makes segment * counting tricky. So rather than try to count segments up front, we * just give up if there are too few segments to hold a reasonably * fragmented packet currently available. If we run @@ -2154,8 +2240,9 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx, static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) { struct myri10ge_priv *mgp = netdev_priv(dev); + struct myri10ge_slice_state *ss; struct mcp_kreq_ether_send *req; - struct myri10ge_tx_buf *tx = &mgp->tx; + struct myri10ge_tx_buf *tx; struct skb_frag_struct *frag; dma_addr_t bus; u32 low; @@ -2166,6 +2253,9 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) int cum_len, seglen, boundary, rdma_count; u8 flags, odd_flag; + /* always transmit through slot 0 */ + ss = &mgp->ss; + tx = &ss->tx; again: req = tx->req_list; avail = tx->mask - 1 - (tx->req - tx->done); @@ -2180,7 +2270,7 @@ again: if ((unlikely(avail < max_segments))) { /* we are out of transmit resources */ - mgp->stop_queue++; + tx->stop_queue++; netif_stop_queue(dev); return 1; } @@ -2242,7 +2332,7 @@ again: if (skb_padto(skb, ETH_ZLEN)) { /* The packet is gone, so we must * return 0 */ - mgp->stats.tx_dropped += 1; + ss->stats.tx_dropped += 1; return 0; } /* adjust the len to account for the zero pad @@ -2284,7 +2374,7 @@ again: while (1) { /* Break the SKB or Fragment up into pieces which - * do not cross mgp->tx.boundary */ + * do not cross mgp->tx_boundary */ low = MYRI10GE_LOWPART_TO_U32(bus); high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); while (len) { @@ -2294,7 +2384,8 @@ again: if (unlikely(count == max_segments)) goto abort_linearize; - boundary = (low + tx->boundary) & ~(tx->boundary - 1); + boundary = + (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1); seglen = boundary - low; if (seglen > len) seglen = len; @@ -2378,7 +2469,7 @@ again: myri10ge_submit_req_wc(tx, tx->req_list, count); tx->pkt_start++; if ((avail - count) < MXGEFW_MAX_SEND_DESC) { - mgp->stop_queue++; + tx->stop_queue++; netif_stop_queue(dev); } dev->trans_start = jiffies; @@ -2420,12 +2511,12 @@ abort_linearize: if (skb_linearize(skb)) goto drop; - mgp->tx_linearized++; + tx->linearized++; goto again; drop: dev_kfree_skb_any(skb); - mgp->stats.tx_dropped += 1; + ss->stats.tx_dropped += 1; return 0; } @@ -2433,7 +2524,7 @@ drop: static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *segs, *curr; - struct myri10ge_priv *mgp = dev->priv; + struct myri10ge_priv *mgp = netdev_priv(dev); int status; segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); @@ -2473,14 +2564,13 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev) static void myri10ge_set_multicast_list(struct net_device *dev) { + struct myri10ge_priv *mgp = netdev_priv(dev); struct myri10ge_cmd cmd; - struct myri10ge_priv *mgp; struct dev_mc_list *mc_list; __be32 data[2] = { 0, 0 }; int err; DECLARE_MAC_BUF(mac); - mgp = netdev_priv(dev); /* can be called from atomic contexts, * pass 1 to force atomicity in myri10ge_send_cmd() */ myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); @@ -2616,13 +2706,14 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { if (myri10ge_ecrc_enable > 1) { - struct pci_dev *old_bridge = bridge; + struct pci_dev *prev_bridge, *old_bridge = bridge; /* Walk the hierarchy up to the root port * where ECRC has to be enabled */ do { + prev_bridge = bridge; bridge = bridge->bus->self; - if (!bridge) { + if (!bridge || prev_bridge == bridge) { dev_err(dev, "Failed to find root port" " to force ECRC\n"); @@ -2681,9 +2772,9 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) * already been enabled, then it must use a firmware image which works * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it * should also ensure that it never gives the device a Read-DMA which is - * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is + * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) - * firmware image, and set tx.boundary to 4KB. + * firmware image, and set tx_boundary to 4KB. */ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) @@ -2692,7 +2783,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) struct device *dev = &pdev->dev; int status; - mgp->tx.boundary = 4096; + mgp->tx_boundary = 4096; /* * Verify the max read request size was set to 4KB * before trying the test with 4KB. @@ -2704,7 +2795,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) } if (status != 4096) { dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); - mgp->tx.boundary = 2048; + mgp->tx_boundary = 2048; } /* * load the optimized firmware (which assumes aligned PCIe @@ -2737,7 +2828,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) "Please install up to date fw\n"); abort: /* fall back to using the unaligned firmware */ - mgp->tx.boundary = 2048; + mgp->tx_boundary = 2048; mgp->fw_name = myri10ge_fw_unaligned; } @@ -2758,7 +2849,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) if (link_width < 8) { dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", link_width); - mgp->tx.boundary = 4096; + mgp->tx_boundary = 4096; mgp->fw_name = myri10ge_fw_aligned; } else { myri10ge_firmware_probe(mgp); @@ -2767,12 +2858,12 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) if (myri10ge_force_firmware == 1) { dev_info(&mgp->pdev->dev, "Assuming aligned completions (forced)\n"); - mgp->tx.boundary = 4096; + mgp->tx_boundary = 4096; mgp->fw_name = myri10ge_fw_aligned; } else { dev_info(&mgp->pdev->dev, "Assuming unaligned completions (forced)\n"); - mgp->tx.boundary = 2048; + mgp->tx_boundary = 2048; mgp->fw_name = myri10ge_fw_unaligned; } } @@ -2889,6 +2980,7 @@ static void myri10ge_watchdog(struct work_struct *work) { struct myri10ge_priv *mgp = container_of(work, struct myri10ge_priv, watchdog_work); + struct myri10ge_tx_buf *tx; u32 reboot; int status; u16 cmd, vendor; @@ -2938,15 +3030,16 @@ static void myri10ge_watchdog(struct work_struct *work) printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", mgp->dev->name); + tx = &mgp->ss.tx; printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", - mgp->dev->name, mgp->tx.req, mgp->tx.done, - mgp->tx.pkt_start, mgp->tx.pkt_done, - (int)ntohl(mgp->fw_stats->send_done_count)); + mgp->dev->name, tx->req, tx->done, + tx->pkt_start, tx->pkt_done, + (int)ntohl(mgp->ss.fw_stats->send_done_count)); msleep(2000); printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", - mgp->dev->name, mgp->tx.req, mgp->tx.done, - mgp->tx.pkt_start, mgp->tx.pkt_done, - (int)ntohl(mgp->fw_stats->send_done_count)); + mgp->dev->name, tx->req, tx->done, + tx->pkt_start, tx->pkt_done, + (int)ntohl(mgp->ss.fw_stats->send_done_count)); } rtnl_lock(); myri10ge_close(mgp->dev); @@ -2969,28 +3062,31 @@ static void myri10ge_watchdog(struct work_struct *work) static void myri10ge_watchdog_timer(unsigned long arg) { struct myri10ge_priv *mgp; + struct myri10ge_slice_state *ss; u32 rx_pause_cnt; mgp = (struct myri10ge_priv *)arg; - if (mgp->rx_small.watchdog_needed) { - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, + rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause); + + ss = &mgp->ss; + if (ss->rx_small.watchdog_needed) { + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, mgp->small_bytes + MXGEFW_PAD, 1); - if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >= + if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= myri10ge_fill_thresh) - mgp->rx_small.watchdog_needed = 0; + ss->rx_small.watchdog_needed = 0; } - if (mgp->rx_big.watchdog_needed) { - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1); - if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >= + if (ss->rx_big.watchdog_needed) { + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1); + if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= myri10ge_fill_thresh) - mgp->rx_big.watchdog_needed = 0; + ss->rx_big.watchdog_needed = 0; } - rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause); - if (mgp->tx.req != mgp->tx.done && - mgp->tx.done == mgp->watchdog_tx_done && - mgp->watchdog_tx_req != mgp->watchdog_tx_done) { + if (ss->tx.req != ss->tx.done && + ss->tx.done == ss->watchdog_tx_done && + ss->watchdog_tx_req != ss->watchdog_tx_done) { /* nic seems like it might be stuck.. */ if (rx_pause_cnt != mgp->watchdog_pause) { if (net_ratelimit()) @@ -3005,8 +3101,8 @@ static void myri10ge_watchdog_timer(unsigned long arg) /* rearm timer */ mod_timer(&mgp->watchdog_timer, jiffies + myri10ge_watchdog_timeout * HZ); - mgp->watchdog_tx_done = mgp->tx.done; - mgp->watchdog_tx_req = mgp->tx.req; + ss->watchdog_tx_done = ss->tx.done; + ss->watchdog_tx_req = ss->tx.req; mgp->watchdog_pause = rx_pause_cnt; } @@ -3030,7 +3126,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mgp = netdev_priv(netdev); mgp->dev = netdev; - netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight); + netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight); mgp->pdev = pdev; mgp->csum_flag = MXGEFW_FLAGS_CKSUM; mgp->pause = myri10ge_flow_control; @@ -3076,9 +3172,9 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (mgp->cmd == NULL) goto abort_with_netdev; - mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats), - &mgp->fw_stats_bus, GFP_KERNEL); - if (mgp->fw_stats == NULL) + mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), + &mgp->ss.fw_stats_bus, GFP_KERNEL); + if (mgp->ss.fw_stats == NULL) goto abort_with_cmd; mgp->board_span = pci_resource_len(pdev, 0); @@ -3118,12 +3214,12 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->dev_addr[i] = mgp->mac_addr[i]; /* allocate rx done ring */ - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); - mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, - &mgp->rx_done.bus, GFP_KERNEL); - if (mgp->rx_done.entry == NULL) + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); + mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, + &mgp->ss.rx_done.bus, GFP_KERNEL); + if (mgp->ss.rx_done.entry == NULL) goto abort_with_ioremap; - memset(mgp->rx_done.entry, 0, bytes); + memset(mgp->ss.rx_done.entry, 0, bytes); myri10ge_select_firmware(mgp); @@ -3183,7 +3279,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", (mgp->msi_enabled ? "MSI" : "xPIC"), - netdev->irq, mgp->tx.boundary, mgp->fw_name, + netdev->irq, mgp->tx_boundary, mgp->fw_name, (mgp->wc_enabled ? "Enabled" : "Disabled")); return 0; @@ -3195,9 +3291,9 @@ abort_with_firmware: myri10ge_dummy_rdma(mgp, 0); abort_with_rx_done: - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); dma_free_coherent(&pdev->dev, bytes, - mgp->rx_done.entry, mgp->rx_done.bus); + mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); abort_with_ioremap: iounmap(mgp->sram); @@ -3207,8 +3303,8 @@ abort_with_wc: if (mgp->mtrr >= 0) mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); #endif - dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), - mgp->fw_stats, mgp->fw_stats_bus); + dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), + mgp->ss.fw_stats, mgp->ss.fw_stats_bus); abort_with_cmd: dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), @@ -3246,9 +3342,9 @@ static void myri10ge_remove(struct pci_dev *pdev) /* avoid a memory leak */ pci_restore_state(pdev); - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); dma_free_coherent(&pdev->dev, bytes, - mgp->rx_done.entry, mgp->rx_done.bus); + mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); iounmap(mgp->sram); @@ -3256,8 +3352,8 @@ static void myri10ge_remove(struct pci_dev *pdev) if (mgp->mtrr >= 0) mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); #endif - dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), - mgp->fw_stats, mgp->fw_stats_bus); + dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), + mgp->ss.fw_stats, mgp->ss.fw_stats_bus); dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), mgp->cmd, mgp->cmd_bus); diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h index 58e57178c56..fdbeeee0737 100644 --- a/drivers/net/myri10ge/myri10ge_mcp.h +++ b/drivers/net/myri10ge/myri10ge_mcp.h @@ -10,7 +10,7 @@ struct mcp_dma_addr { __be32 low; }; -/* 4 Bytes. 8 Bytes for NDIS drivers. */ +/* 4 Bytes */ struct mcp_slot { __sum16 checksum; __be16 length; @@ -144,6 +144,7 @@ enum myri10ge_mcp_cmd_type { * a power of 2 number of entries. */ MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */ +#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31) /* command to bring ethernet interface up. Above parameters * (plus mtu & mac address) must have been exchanged prior @@ -221,10 +222,14 @@ enum myri10ge_mcp_cmd_type { MXGEFW_CMD_GET_MAX_RSS_QUEUES, MXGEFW_CMD_ENABLE_RSS_QUEUES, /* data0 = number of slices n (0, 1, ..., n-1) to enable - * data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue. + * data1 = interrupt mode. + * 0=share one INTx/MSI, 1=use one MSI-X per queue. * If all queues share one interrupt, the driver must have set * RSS_SHARED_INTERRUPT_DMA before enabling queues. */ +#define MXGEFW_SLICE_INTR_MODE_SHARED 0 +#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 + MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, /* data0, data1 = bus address lsw, msw */ @@ -241,10 +246,14 @@ enum myri10ge_mcp_cmd_type { * 0: disable rss. nic does not distribute receive packets. * 1: enable rss. nic distributes receive packets among queues. * data1 = hash type - * 1: IPV4 - * 2: TCP_IPV4 - * 3: IPV4 | TCP_IPV4 + * 1: IPV4 (required by RSS) + * 2: TCP_IPV4 (required by RSS) + * 3: IPV4 | TCP_IPV4 (required by RSS) + * 4: source port */ +#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 +#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 +#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, /* Return data = the max. size of the entire headers of a IPv6 TSO packet. @@ -260,6 +269,8 @@ enum myri10ge_mcp_cmd_type { * 0: Linux/FreeBSD style (NIC default) * 1: NDIS/NetBSD style */ +#define MXGEFW_TSO_MODE_LINUX 0 +#define MXGEFW_TSO_MODE_NDIS 1 MXGEFW_CMD_MDIO_READ, /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ @@ -286,6 +297,38 @@ enum myri10ge_mcp_cmd_type { /* Return data = NIC memory offset of mcp_vpump_public_global */ MXGEFW_CMD_RESET_VPUMP, /* Resets the VPUMP state */ + + MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, + /* data0 = mcp_slot type to use. + * 0 = the default 4B mcp_slot + * 1 = 8B mcp_slot_8 + */ +#define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0 +#define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1 + + MXGEFW_CMD_SET_THROTTLE_FACTOR, + /* set the throttle factor for ethp_z8e + * data0 = throttle_factor + * throttle_factor = 256 * pcie-raw-speed / tx_speed + * tx_speed = 256 * pcie-raw-speed / throttle_factor + * + * For PCI-E x8: pcie-raw-speed == 16Gb/s + * For PCI-E x4: pcie-raw-speed == 8Gb/s + * + * ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s + * ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s + * + * with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s + * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s + */ + + MXGEFW_CMD_VPUMP_UP, + /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */ + MXGEFW_CMD_GET_VPUMP_CLK, + /* Get the lanai clock */ + + MXGEFW_CMD_GET_DCA_OFFSET, + /* offset of dca control for WDMAs */ }; enum myri10ge_mcp_cmd_status { @@ -302,7 +345,8 @@ enum myri10ge_mcp_cmd_status { MXGEFW_CMD_ERROR_UNALIGNED, MXGEFW_CMD_ERROR_NO_MDIO, MXGEFW_CMD_ERROR_XFP_FAILURE, - MXGEFW_CMD_ERROR_XFP_ABSENT + MXGEFW_CMD_ERROR_XFP_ABSENT, + MXGEFW_CMD_ERROR_BAD_PCIE_LINK }; #define MXGEFW_OLD_IRQ_DATA_LEN 40 diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h index 16a810dd6d5..07d65c2cbb2 100644 --- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h +++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h @@ -1,30 +1,6 @@ #ifndef __MYRI10GE_MCP_GEN_HEADER_H__ #define __MYRI10GE_MCP_GEN_HEADER_H__ -/* this file define a standard header used as a first entry point to - * exchange information between firmware/driver and driver. The - * header structure can be anywhere in the mcp. It will usually be in - * the .data section, because some fields needs to be initialized at - * compile time. - * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must - * contains the location of the header. - * - * Typically a MCP will start with the following: - * .text - * .space 52 ! to help catch MEMORY_INT errors - * bt start ! jump to real code - * nop - * .long _gen_mcp_header - * - * The source will have a definition like: - * - * mcp_gen_header_t gen_mcp_header = { - * .header_length = sizeof(mcp_gen_header_t), - * .mcp_type = MCP_TYPE_XXX, - * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $", - * .mcp_globals = (unsigned)&Globals - * }; - */ #define MCP_HEADER_PTR_OFFSET 0x3c @@ -32,13 +8,14 @@ #define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */ #define MCP_TYPE_ETH 0x45544820 /* "ETH " */ #define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */ +#define MCP_TYPE_DFLT 0x20202020 /* " " */ struct mcp_gen_header { /* the first 4 fields are filled at compile time */ unsigned header_length; __be32 mcp_type; char version[128]; - unsigned mcp_globals; /* pointer to mcp-type specific structure */ + unsigned mcp_private; /* pointer to mcp-type specific structure */ /* filled by the MCP at run-time */ unsigned sram_size; @@ -53,6 +30,18 @@ struct mcp_gen_header { * * Never remove any field. Keep everything naturally align. */ + + /* Specifies if the running mcp is mcp0, 1, or 2. */ + unsigned char mcp_index; + unsigned char disable_rabbit; + unsigned char unaligned_tlp; + unsigned char pad1; + unsigned counters_addr; + unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ + unsigned short handoff_id_major; /* must be equal */ + unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */ + unsigned msix_table_addr; /* start address of msix table in firmware */ + /* 8 */ }; #endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */ diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 4009c4ce96b..918f802fe08 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -1,6 +1,6 @@ /* niu.c: Neptune ethernet driver. * - * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> @@ -33,8 +33,8 @@ #define DRV_MODULE_NAME "niu" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "0.8" -#define DRV_MODULE_RELDATE "April 24, 2008" +#define DRV_MODULE_VERSION "0.9" +#define DRV_MODULE_RELDATE "May 4, 2008" static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; @@ -865,7 +865,6 @@ static int link_status_1g_serdes(struct niu *np, int *link_up_p) return 0; } - static int link_status_10g_serdes(struct niu *np, int *link_up_p) { unsigned long flags; @@ -900,7 +899,6 @@ static int link_status_10g_serdes(struct niu *np, int *link_up_p) return 0; } - static int link_status_1g_rgmii(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; @@ -957,7 +955,6 @@ out: return err; } - static int bcm8704_reset(struct niu *np) { int err, limit; @@ -1357,8 +1354,6 @@ static int mii_reset(struct niu *np) return 0; } - - static int xcvr_init_1g_rgmii(struct niu *np) { int err; @@ -1419,7 +1414,6 @@ static int xcvr_init_1g_rgmii(struct niu *np) return 0; } - static int mii_init_common(struct niu *np) { struct niu_link_config *lp = &np->link_config; @@ -7008,31 +7002,20 @@ static int __devinit niu_phy_type_prop_decode(struct niu *np, return 0; } -/* niu board models have a trailing dash version incremented - * with HW rev change. Need to ingnore the dash version while - * checking for match - * - * for example, for the 10G card the current vpd.board_model - * is 501-5283-04, of which -04 is the dash version and have - * to be ignored - */ -static int niu_board_model_match(struct niu *np, const char *model) -{ - return !strncmp(np->vpd.board_model, model, strlen(model)); -} - static int niu_pci_vpd_get_nports(struct niu *np) { int ports = 0; - if ((niu_board_model_match(np, NIU_QGC_LP_BM_STR)) || - (niu_board_model_match(np, NIU_QGC_PEM_BM_STR)) || - (niu_board_model_match(np, NIU_ALONSO_BM_STR))) { + if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { ports = 4; - } else if ((niu_board_model_match(np, NIU_2XGF_LP_BM_STR)) || - (niu_board_model_match(np, NIU_2XGF_PEM_BM_STR)) || - (niu_board_model_match(np, NIU_FOXXY_BM_STR)) || - (niu_board_model_match(np, NIU_2XGF_MRVL_BM_STR))) { + } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { ports = 2; } @@ -7053,8 +7036,8 @@ static void __devinit niu_pci_vpd_validate(struct niu *np) return; } - if (!strcmp(np->vpd.model, "SUNW,CP3220") || - !strcmp(np->vpd.model, "SUNW,CP3260")) { + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || + !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->flags |= NIU_FLAGS_XCVR_SERDES; @@ -7065,7 +7048,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np) } if (np->flags & NIU_FLAGS_10G) np->mac_xcvr = MAC_XCVR_XPCS; - } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) { + } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { @@ -7264,8 +7247,11 @@ static int __devinit niu_get_and_validate_port(struct niu *np) parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; + /* All of the current probing methods fail on + * Maramba on-board parts. + */ if (!parent->num_ports) - return -ENODEV; + parent->num_ports = 4; } } } @@ -7538,8 +7524,8 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) u32 val; int err; - if (!strcmp(np->vpd.model, "SUNW,CP3220") || - !strcmp(np->vpd.model, "SUNW,CP3260")) { + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || + !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { num_10g = 0; num_1g = 2; parent->plat_type = PLAT_TYPE_ATCA_CP3220; @@ -7548,7 +7534,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); - } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) { + } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { num_10g = 2; num_1g = 0; parent->num_ports = 2; @@ -7943,6 +7929,7 @@ static int __devinit niu_get_of_props(struct niu *np) struct device_node *dp; const char *phy_type; const u8 *mac_addr; + const char *model; int prop_len; if (np->parent->plat_type == PLAT_TYPE_NIU) @@ -7997,6 +7984,11 @@ static int __devinit niu_get_of_props(struct niu *np) memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); + model = of_get_property(dp, "model", &prop_len); + + if (model) + strcpy(np->vpd.model, model); + return 0; #else return -EINVAL; diff --git a/drivers/net/niu.h b/drivers/net/niu.h index 101a3f1a8ec..c6fa883daa2 100644 --- a/drivers/net/niu.h +++ b/drivers/net/niu.h @@ -2946,6 +2946,15 @@ struct rx_ring_info { #define NIU_ALONSO_BM_STR "373-0202" #define NIU_FOXXY_BM_STR "501-7961" #define NIU_2XGF_MRVL_BM_STR "SK-6E82" +#define NIU_QGC_LP_MDL_STR "SUNW,pcie-qgc" +#define NIU_2XGF_LP_MDL_STR "SUNW,pcie-2xgf" +#define NIU_QGC_PEM_MDL_STR "SUNW,pcie-qgc-pem" +#define NIU_2XGF_PEM_MDL_STR "SUNW,pcie-2xgf-pem" +#define NIU_ALONSO_MDL_STR "SUNW,CP3220" +#define NIU_KIMI_MDL_STR "SUNW,CP3260" +#define NIU_MARAMBA_MDL_STR "SUNW,pcie-neptune" +#define NIU_FOXXY_MDL_STR "SUNW,pcie-rfem" +#define NIU_2XGF_MRVL_MDL_STR "SysKonnect,pcie-2xgf" #define NIU_VPD_MIN_MAJOR 3 #define NIU_VPD_MIN_MINOR 4 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index d0d5585114b..81fd85214b9 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -22,12 +22,8 @@ *************************************************************************/ #define DRV_NAME "pcnet32" -#ifdef CONFIG_PCNET32_NAPI -#define DRV_VERSION "1.34-NAPI" -#else -#define DRV_VERSION "1.34" -#endif -#define DRV_RELDATE "14.Aug.2007" +#define DRV_VERSION "1.35" +#define DRV_RELDATE "21.Apr.2008" #define PFX DRV_NAME ": " static const char *const version = @@ -445,30 +441,24 @@ static struct pcnet32_access pcnet32_dwio = { static void pcnet32_netif_stop(struct net_device *dev) { -#ifdef CONFIG_PCNET32_NAPI struct pcnet32_private *lp = netdev_priv(dev); -#endif + dev->trans_start = jiffies; -#ifdef CONFIG_PCNET32_NAPI napi_disable(&lp->napi); -#endif netif_tx_disable(dev); } static void pcnet32_netif_start(struct net_device *dev) { -#ifdef CONFIG_PCNET32_NAPI struct pcnet32_private *lp = netdev_priv(dev); ulong ioaddr = dev->base_addr; u16 val; -#endif + netif_wake_queue(dev); -#ifdef CONFIG_PCNET32_NAPI val = lp->a.read_csr(ioaddr, CSR3); val &= 0x00ff; lp->a.write_csr(ioaddr, CSR3, val); napi_enable(&lp->napi); -#endif } /* @@ -911,11 +901,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) rc = 1; /* default to fail */ if (netif_running(dev)) -#ifdef CONFIG_PCNET32_NAPI pcnet32_netif_stop(dev); -#else - pcnet32_close(dev); -#endif spin_lock_irqsave(&lp->lock, flags); lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ @@ -1046,7 +1032,6 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ a->write_bcr(ioaddr, 32, (x & ~0x0002)); -#ifdef CONFIG_PCNET32_NAPI if (netif_running(dev)) { pcnet32_netif_start(dev); pcnet32_restart(dev, CSR0_NORMAL); @@ -1055,16 +1040,6 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ } spin_unlock_irqrestore(&lp->lock, flags); -#else - if (netif_running(dev)) { - spin_unlock_irqrestore(&lp->lock, flags); - pcnet32_open(dev); - } else { - pcnet32_purge_rx_ring(dev); - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ - spin_unlock_irqrestore(&lp->lock, flags); - } -#endif return (rc); } /* end pcnet32_loopback_test */ @@ -1270,11 +1245,7 @@ static void pcnet32_rx_entry(struct net_device *dev, } dev->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, dev); -#ifdef CONFIG_PCNET32_NAPI netif_receive_skb(skb); -#else - netif_rx(skb); -#endif dev->last_rx = jiffies; dev->stats.rx_packets++; return; @@ -1403,7 +1374,6 @@ static int pcnet32_tx(struct net_device *dev) return must_restart; } -#ifdef CONFIG_PCNET32_NAPI static int pcnet32_poll(struct napi_struct *napi, int budget) { struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); @@ -1442,7 +1412,6 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) } return work_done; } -#endif #define PCNET32_REGS_PER_PHY 32 #define PCNET32_MAX_PHYS 32 @@ -1864,9 +1833,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) /* napi.weight is used in both the napi and non-napi cases */ lp->napi.weight = lp->rx_ring_size / 2; -#ifdef CONFIG_PCNET32_NAPI netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); -#endif if (fdx && !(lp->options & PCNET32_PORT_ASEL) && ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) @@ -2297,9 +2264,7 @@ static int pcnet32_open(struct net_device *dev) goto err_free_ring; } -#ifdef CONFIG_PCNET32_NAPI napi_enable(&lp->napi); -#endif /* Re-initialize the PCNET32, and start it when done. */ lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); @@ -2623,7 +2588,6 @@ pcnet32_interrupt(int irq, void *dev_id) dev->name, csr0); /* unlike for the lance, there is no restart needed */ } -#ifdef CONFIG_PCNET32_NAPI if (netif_rx_schedule_prep(dev, &lp->napi)) { u16 val; /* set interrupt masks */ @@ -2634,24 +2598,9 @@ pcnet32_interrupt(int irq, void *dev_id) __netif_rx_schedule(dev, &lp->napi); break; } -#else - pcnet32_rx(dev, lp->napi.weight); - if (pcnet32_tx(dev)) { - /* reset the chip to clear the error condition, then restart */ - lp->a.reset(ioaddr); - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ - pcnet32_restart(dev, CSR0_START); - netif_wake_queue(dev); - } -#endif csr0 = lp->a.read_csr(ioaddr, CSR0); } -#ifndef CONFIG_PCNET32_NAPI - /* Set interrupt enable. */ - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); -#endif - if (netif_msg_intr(lp)) printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", dev->name, lp->a.read_csr(ioaddr, CSR0)); @@ -2670,9 +2619,7 @@ static int pcnet32_close(struct net_device *dev) del_timer_sync(&lp->watchdog_timer); netif_stop_queue(dev); -#ifdef CONFIG_PCNET32_NAPI napi_disable(&lp->napi); -#endif spin_lock_irqsave(&lp->lock, flags); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 3c18bb59495..45cc2914d34 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -547,7 +547,7 @@ static void phy_force_reduction(struct phy_device *phydev) * Must not be called from interrupt context, or while the * phydev->lock is held. */ -void phy_error(struct phy_device *phydev) +static void phy_error(struct phy_device *phydev) { mutex_lock(&phydev->lock); phydev->state = PHY_HALTED; diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index d3207c0da89..1f4ca2b54a7 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -2458,6 +2458,7 @@ ppp_create_interface(int unit, int *retp) out3: atomic_dec(&ppp_unit_count); + unregister_netdev(dev); out2: mutex_unlock(&all_ppp_mutex); free_netdev(dev); diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 244d7830c92..79359919335 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c @@ -1621,9 +1621,16 @@ out_no_ppp: end: release_sock(sk); - if (error != 0) - PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, - "%s: connect failed: %d\n", session->name, error); + if (error != 0) { + if (session) + PRINTK(session->debug, + PPPOL2TP_MSG_CONTROL, KERN_WARNING, + "%s: connect failed: %d\n", + session->name, error); + else + PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, + "connect failed: %d\n", error); + } return error; } diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index 0d32123085e..1dae1f2ed81 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c @@ -2474,6 +2474,8 @@ static void gelic_wl_free(struct gelic_wl_info *wl) pr_debug("%s: <-\n", __func__); + free_page((unsigned long)wl->buf); + pr_debug("%s: destroy queues\n", __func__); destroy_workqueue(wl->eurus_cmd_queue); destroy_workqueue(wl->event_queue); diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile index 0f023447eaf..1d2daeec7ac 100644 --- a/drivers/net/sfc/Makefile +++ b/drivers/net/sfc/Makefile @@ -1,5 +1,5 @@ sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ - i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \ - tenxpress.o boards.o sfe4001.o + i2c-direct.o selftest.o ethtool.o xfp_phy.o \ + mdio_10g.o tenxpress.o boards.o sfe4001.o obj-$(CONFIG_SFC) += sfc.o diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h index f56341d428e..695764dc2e6 100644 --- a/drivers/net/sfc/boards.h +++ b/drivers/net/sfc/boards.h @@ -22,5 +22,7 @@ enum efx_board_type { extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); extern int sfe4001_poweron(struct efx_nic *efx); extern void sfe4001_poweroff(struct efx_nic *efx); +/* Are we putting the PHY into flash config mode */ +extern unsigned int sfe4001_phy_flash_cfg; #endif diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 59edcf793c1..418f2e53a95 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -1873,6 +1873,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, tx_queue->queue = i; tx_queue->buffer = NULL; tx_queue->channel = &efx->channel[0]; /* for safety */ + tx_queue->tso_headers_free = NULL; } for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { rx_queue = &efx->rx_queue[i]; @@ -2071,7 +2072,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, net_dev = alloc_etherdev(sizeof(*efx)); if (!net_dev) return -ENOMEM; - net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; + net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_TSO); if (lro) net_dev->features |= NETIF_F_LRO; efx = net_dev->priv; diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h index 43663a4619d..c53290d08e2 100644 --- a/drivers/net/sfc/enum.h +++ b/drivers/net/sfc/enum.h @@ -10,6 +10,55 @@ #ifndef EFX_ENUM_H #define EFX_ENUM_H +/** + * enum efx_loopback_mode - loopback modes + * @LOOPBACK_NONE: no loopback + * @LOOPBACK_XGMII: loopback within MAC at XGMII level + * @LOOPBACK_XGXS: loopback within MAC at XGXS level + * @LOOPBACK_XAUI: loopback within MAC at XAUI level + * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level + * @LOOPBACK_PCS: loopback within PHY at PCS level + * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level + * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!) + */ +/* Please keep in order and up-to-date w.r.t the following two #defines */ +enum efx_loopback_mode { + LOOPBACK_NONE = 0, + LOOPBACK_MAC = 1, + LOOPBACK_XGMII = 2, + LOOPBACK_XGXS = 3, + LOOPBACK_XAUI = 4, + LOOPBACK_PHY = 5, + LOOPBACK_PHYXS = 6, + LOOPBACK_PCS = 7, + LOOPBACK_PMAPMD = 8, + LOOPBACK_NETWORK = 9, + LOOPBACK_MAX +}; + +#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD + +extern const char *efx_loopback_mode_names[]; +#define LOOPBACK_MODE_NAME(mode) \ + STRING_TABLE_LOOKUP(mode, efx_loopback_mode) +#define LOOPBACK_MODE(efx) \ + LOOPBACK_MODE_NAME(efx->loopback_mode) + +/* These loopbacks occur within the controller */ +#define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \ + (1 << LOOPBACK_XGXS) | \ + (1 << LOOPBACK_XAUI)) + +#define LOOPBACK_MASK(_efx) \ + (1 << (_efx)->loopback_mode) + +#define LOOPBACK_INTERNAL(_efx) \ + ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0) + +#define LOOPBACK_OUT_OF(_from, _to, _mask) \ + (((LOOPBACK_MASK(_from) & (_mask)) && \ + ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0) + /*****************************************************************************/ /** diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index ad541badbd9..e2c75d10161 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -12,12 +12,26 @@ #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include "net_driver.h" +#include "selftest.h" #include "efx.h" #include "ethtool.h" #include "falcon.h" #include "gmii.h" #include "mac.h" +const char *efx_loopback_mode_names[] = { + [LOOPBACK_NONE] = "NONE", + [LOOPBACK_MAC] = "MAC", + [LOOPBACK_XGMII] = "XGMII", + [LOOPBACK_XGXS] = "XGXS", + [LOOPBACK_XAUI] = "XAUI", + [LOOPBACK_PHY] = "PHY", + [LOOPBACK_PHYXS] = "PHY(XS)", + [LOOPBACK_PCS] = "PHY(PCS)", + [LOOPBACK_PMAPMD] = "PHY(PMAPMD)", + [LOOPBACK_NETWORK] = "NETWORK", +}; + static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); struct ethtool_string { @@ -217,23 +231,179 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev, strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); } +/** + * efx_fill_test - fill in an individual self-test entry + * @test_index: Index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * @test: Pointer to test result (used only if data != %NULL) + * @unit_format: Unit name format (e.g. "channel\%d") + * @unit_id: Unit id (e.g. 0 for "channel0") + * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") + * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent") + * + * Fill in an individual self-test entry. + */ +static void efx_fill_test(unsigned int test_index, + struct ethtool_string *strings, u64 *data, + int *test, const char *unit_format, int unit_id, + const char *test_format, const char *test_id) +{ + struct ethtool_string unit_str, test_str; + + /* Fill data value, if applicable */ + if (data) + data[test_index] = *test; + + /* Fill string, if applicable */ + if (strings) { + snprintf(unit_str.name, sizeof(unit_str.name), + unit_format, unit_id); + snprintf(test_str.name, sizeof(test_str.name), + test_format, test_id); + snprintf(strings[test_index].name, + sizeof(strings[test_index].name), + "%-9s%-17s", unit_str.name, test_str.name); + } +} + +#define EFX_PORT_NAME "port%d", 0 +#define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel +#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue +#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue +#define EFX_LOOPBACK_NAME(_mode, _counter) \ + "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode) + +/** + * efx_fill_loopback_test - fill in a block of loopback self-test entries + * @efx: Efx NIC + * @lb_tests: Efx loopback self-test results structure + * @mode: Loopback test mode + * @test_index: Starting index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + */ +static int efx_fill_loopback_test(struct efx_nic *efx, + struct efx_loopback_self_tests *lb_tests, + enum efx_loopback_mode mode, + unsigned int test_index, + struct ethtool_string *strings, u64 *data) +{ + struct efx_tx_queue *tx_queue; + + efx_for_each_tx_queue(tx_queue, efx) { + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_sent[tx_queue->queue], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_sent")); + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_done[tx_queue->queue], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_done")); + } + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_good, + EFX_PORT_NAME, + EFX_LOOPBACK_NAME(mode, "rx_good")); + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_bad, + EFX_PORT_NAME, + EFX_LOOPBACK_NAME(mode, "rx_bad")); + + return test_index; +} + +/** + * efx_ethtool_fill_self_tests - get self-test details + * @efx: Efx NIC + * @tests: Efx self-test results structure, or %NULL + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + */ +static int efx_ethtool_fill_self_tests(struct efx_nic *efx, + struct efx_self_tests *tests, + struct ethtool_string *strings, + u64 *data) +{ + struct efx_channel *channel; + unsigned int n = 0; + enum efx_loopback_mode mode; + + /* Interrupt */ + efx_fill_test(n++, strings, data, &tests->interrupt, + "core", 0, "interrupt", NULL); + + /* Event queues */ + efx_for_each_channel(channel, efx) { + efx_fill_test(n++, strings, data, + &tests->eventq_dma[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.dma", NULL); + efx_fill_test(n++, strings, data, + &tests->eventq_int[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.int", NULL); + efx_fill_test(n++, strings, data, + &tests->eventq_poll[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.poll", NULL); + } + + /* PHY presence */ + efx_fill_test(n++, strings, data, &tests->phy_ok, + EFX_PORT_NAME, "phy_ok", NULL); + + /* Loopback tests */ + efx_fill_test(n++, strings, data, &tests->loopback_speed, + EFX_PORT_NAME, "loopback.speed", NULL); + efx_fill_test(n++, strings, data, &tests->loopback_full_duplex, + EFX_PORT_NAME, "loopback.full_duplex", NULL); + for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { + if (!(efx->loopback_modes & (1 << mode))) + continue; + n = efx_fill_loopback_test(efx, + &tests->loopback[mode], mode, n, + strings, data); + } + + return n; +} + static int efx_ethtool_get_stats_count(struct net_device *net_dev) { return EFX_ETHTOOL_NUM_STATS; } +static int efx_ethtool_self_test_count(struct net_device *net_dev) +{ + struct efx_nic *efx = net_dev->priv; + + return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); +} + static void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, u8 *strings) { + struct efx_nic *efx = net_dev->priv; struct ethtool_string *ethtool_strings = (struct ethtool_string *)strings; int i; - if (string_set == ETH_SS_STATS) + switch (string_set) { + case ETH_SS_STATS: for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) strncpy(ethtool_strings[i].name, efx_ethtool_stats[i].name, sizeof(ethtool_strings[i].name)); + break; + case ETH_SS_TEST: + efx_ethtool_fill_self_tests(efx, NULL, + ethtool_strings, NULL); + break; + default: + /* No other string sets */ + break; + } } static void efx_ethtool_get_stats(struct net_device *net_dev, @@ -272,6 +442,22 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, } } +static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) +{ + int rc; + + /* Our TSO requires TX checksumming, so force TX checksumming + * on when TSO is enabled. + */ + if (enable) { + rc = efx_ethtool_set_tx_csum(net_dev, 1); + if (rc) + return rc; + } + + return ethtool_op_set_tso(net_dev, enable); +} + static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) { struct efx_nic *efx = net_dev->priv; @@ -283,6 +469,15 @@ static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) efx_flush_queues(efx); + /* Our TSO requires TX checksumming, so disable TSO when + * checksumming is disabled + */ + if (!enable) { + rc = efx_ethtool_set_tso(net_dev, 0); + if (rc) + return rc; + } + return 0; } @@ -305,6 +500,64 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) return efx->rx_checksum_enabled; } +static void efx_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data) +{ + struct efx_nic *efx = net_dev->priv; + struct efx_self_tests efx_tests; + int offline, already_up; + int rc; + + ASSERT_RTNL(); + if (efx->state != STATE_RUNNING) { + rc = -EIO; + goto fail1; + } + + /* We need rx buffers and interrupts. */ + already_up = (efx->net_dev->flags & IFF_UP); + if (!already_up) { + rc = dev_open(efx->net_dev); + if (rc) { + EFX_ERR(efx, "failed opening device.\n"); + goto fail2; + } + } + + memset(&efx_tests, 0, sizeof(efx_tests)); + offline = (test->flags & ETH_TEST_FL_OFFLINE); + + /* Perform online self tests first */ + rc = efx_online_test(efx, &efx_tests); + if (rc) + goto out; + + /* Perform offline tests only if online tests passed */ + if (offline) { + /* Stop the kernel from sending packets during the test. */ + efx_stop_queue(efx); + rc = efx_flush_queues(efx); + if (!rc) + rc = efx_offline_test(efx, &efx_tests, + efx->loopback_modes); + efx_wake_queue(efx); + } + + out: + if (!already_up) + dev_close(efx->net_dev); + + EFX_LOG(efx, "%s all %sline self-tests\n", + rc == 0 ? "passed" : "failed", offline ? "off" : "on"); + + fail2: + fail1: + /* Fill ethtool results structures */ + efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); + if (rc) + test->flags |= ETH_TEST_FL_FAILED; +} + /* Restart autonegotiation */ static int efx_ethtool_nway_reset(struct net_device *net_dev) { @@ -451,8 +704,12 @@ struct ethtool_ops efx_ethtool_ops = { .set_tx_csum = efx_ethtool_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = efx_ethtool_set_tso, .get_flags = ethtool_op_get_flags, .set_flags = ethtool_op_set_flags, + .self_test_count = efx_ethtool_self_test_count, + .self_test = efx_ethtool_self_test, .get_strings = efx_ethtool_get_strings, .phys_id = efx_ethtool_phys_id, .get_stats_count = efx_ethtool_get_stats_count, diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 46db549ce58..b57cc68058c 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -1129,6 +1129,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel, case RX_RECOVERY_EV_DECODE: EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " "Resetting.\n", channel->channel); + atomic_inc(&efx->rx_reset); efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? RESET_TYPE_RX_RECOVERY : @@ -1731,7 +1732,8 @@ void falcon_drain_tx_fifo(struct efx_nic *efx) efx_oword_t temp; int count; - if (FALCON_REV(efx) < FALCON_REV_B0) + if ((FALCON_REV(efx) < FALCON_REV_B0) || + (efx->loopback_mode != LOOPBACK_NONE)) return; falcon_read(efx, &temp, MAC0_CTRL_REG_KER); @@ -2091,6 +2093,8 @@ static int falcon_probe_phy(struct efx_nic *efx) efx->phy_type); return -1; } + + efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks; return 0; } @@ -2468,14 +2472,12 @@ int falcon_probe_nic(struct efx_nic *efx) fail5: falcon_free_buffer(efx, &efx->irq_status); fail4: - /* fall-thru */ fail3: if (nic_data->pci_dev2) { pci_dev_put(nic_data->pci_dev2); nic_data->pci_dev2 = NULL; } fail2: - /* fall-thru */ fail1: kfree(efx->nic_data); return rc; diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h index 0485a63eaff..06e2d68fc3d 100644 --- a/drivers/net/sfc/falcon_hwdefs.h +++ b/drivers/net/sfc/falcon_hwdefs.h @@ -636,6 +636,14 @@ #define XX_HIDRVA_WIDTH 1 #define XX_LODRVA_LBN 8 #define XX_LODRVA_WIDTH 1 +#define XX_LPBKD_LBN 3 +#define XX_LPBKD_WIDTH 1 +#define XX_LPBKC_LBN 2 +#define XX_LPBKC_WIDTH 1 +#define XX_LPBKB_LBN 1 +#define XX_LPBKB_WIDTH 1 +#define XX_LPBKA_LBN 0 +#define XX_LPBKA_WIDTH 1 #define XX_TXDRV_CTL_REG_MAC 0x12 #define XX_DEQD_LBN 28 @@ -656,8 +664,14 @@ #define XX_DTXA_WIDTH 4 /* XAUI XGXS core status register */ -#define XX_FORCE_SIG_DECODE_FORCED 0xff #define XX_CORE_STAT_REG_MAC 0x16 +#define XX_FORCE_SIG_LBN 24 +#define XX_FORCE_SIG_WIDTH 8 +#define XX_FORCE_SIG_DECODE_FORCED 0xff +#define XX_XGXS_LB_EN_LBN 23 +#define XX_XGXS_LB_EN_WIDTH 1 +#define XX_XGMII_LB_EN_LBN 22 +#define XX_XGMII_LB_EN_WIDTH 1 #define XX_ALIGN_DONE_LBN 20 #define XX_ALIGN_DONE_WIDTH 1 #define XX_SYNC_STAT_LBN 16 diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index aa7521b24a5..a74b7931a3c 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c @@ -32,7 +32,7 @@ (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) void falcon_xmac_writel(struct efx_nic *efx, - efx_dword_t *value, unsigned int mac_reg) + efx_dword_t *value, unsigned int mac_reg) { efx_oword_t temp; @@ -69,6 +69,10 @@ static int falcon_reset_xmac(struct efx_nic *efx) udelay(10); } + /* This often fails when DSP is disabled, ignore it */ + if (sfe4001_phy_flash_cfg != 0) + return 0; + EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); return -ETIMEDOUT; } @@ -223,7 +227,7 @@ static int falcon_xgmii_status(struct efx_nic *efx) /* The ISR latches, so clear it and re-read */ falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); - + if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || EFX_DWORD_FIELD(reg, XM_RMTFLT)) { EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); @@ -237,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable) { efx_dword_t reg; - if (FALCON_REV(efx) < FALCON_REV_B0) + if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) return; /* Flush the ISR */ @@ -284,6 +288,9 @@ int falcon_xaui_link_ok(struct efx_nic *efx) efx_dword_t reg; int align_done, sync_status, link_ok = 0; + if (LOOPBACK_INTERNAL(efx)) + return 1; + /* Read link status */ falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); @@ -374,6 +381,61 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx) falcon_xmac_writel(efx, ®, XM_ADR_HI_REG_MAC); } +static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) +{ + efx_dword_t reg; + int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0; + int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0; + int xgmii_loopback = + (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0; + + /* XGXS block is flaky and will need to be reset if moving + * into our out of XGMII, XGXS or XAUI loopbacks. */ + if (EFX_WORKAROUND_5147(efx)) { + int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; + int reset_xgxs; + + falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); + old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN); + old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN); + + falcon_xmac_readl(efx, ®, XX_SD_CTL_REG_MAC); + old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA); + + /* The PHY driver may have turned XAUI off */ + reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || + (xaui_loopback != old_xaui_loopback) || + (xgmii_loopback != old_xgmii_loopback)); + if (reset_xgxs) { + falcon_xmac_readl(efx, ®, XX_PWR_RST_REG_MAC); + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1); + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1); + falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); + udelay(1); + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0); + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0); + falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); + udelay(1); + } + } + + falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); + EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG, + (xgxs_loopback || xaui_loopback) ? + XX_FORCE_SIG_DECODE_FORCED : 0); + EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); + EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); + falcon_xmac_writel(efx, ®, XX_CORE_STAT_REG_MAC); + + falcon_xmac_readl(efx, ®, XX_SD_CTL_REG_MAC); + EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback); + EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback); + EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback); + EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback); + falcon_xmac_writel(efx, ®, XX_SD_CTL_REG_MAC); +} + + /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails * to come back up. Bash it until it comes back up */ static int falcon_check_xaui_link_up(struct efx_nic *efx) @@ -382,7 +444,8 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx) tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; max_tries = tries; - if (efx->phy_type == PHY_TYPE_NONE) + if ((efx->loopback_mode == LOOPBACK_NETWORK) || + (efx->phy_type == PHY_TYPE_NONE)) return 0; while (tries) { @@ -408,8 +471,13 @@ void falcon_reconfigure_xmac(struct efx_nic *efx) falcon_mask_status_intr(efx, 0); falcon_deconfigure_mac_wrapper(efx); + + efx->tx_disabled = LOOPBACK_INTERNAL(efx); efx->phy_op->reconfigure(efx); + + falcon_reconfigure_xgxs_core(efx); falcon_reconfigure_xmac_core(efx); + falcon_reconfigure_mac_wrapper(efx); /* Ensure XAUI link is up */ @@ -491,13 +559,15 @@ void falcon_update_stats_xmac(struct efx_nic *efx) (mac_stats->rx_bytes - mac_stats->rx_good_bytes); } -#define EFX_XAUI_RETRAIN_MAX 8 - int falcon_check_xmac(struct efx_nic *efx) { unsigned xaui_link_ok; int rc; + if ((efx->loopback_mode == LOOPBACK_NETWORK) || + (efx->phy_type == PHY_TYPE_NONE)) + return 0; + falcon_mask_status_intr(efx, 0); xaui_link_ok = falcon_xaui_link_ok(efx); diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index dc06bb0aa57..c4f540e93b7 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c @@ -44,6 +44,9 @@ static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd, int status; int phy_id = efx->mii.phy_id; + if (LOOPBACK_INTERNAL(efx)) + return 0; + /* Read MMD STATUS2 to check it is responding. */ status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & @@ -164,6 +167,22 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) int mmd = 0; int good; + /* If the port is in loopback, then we should only consider a subset + * of mmd's */ + if (LOOPBACK_INTERNAL(efx)) + return 1; + else if (efx->loopback_mode == LOOPBACK_NETWORK) + return 0; + else if (efx->loopback_mode == LOOPBACK_PHYXS) + mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS | + MDIO_MMDREG_DEVS0_PCS | + MDIO_MMDREG_DEVS0_PMAPMD); + else if (efx->loopback_mode == LOOPBACK_PCS) + mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS | + MDIO_MMDREG_DEVS0_PMAPMD); + else if (efx->loopback_mode == LOOPBACK_PMAPMD) + mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD; + while (mmd_mask) { if (mmd_mask & 1) { /* Double reads because link state is latched, and a @@ -182,6 +201,65 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) return ok; } +void mdio_clause45_transmit_disable(struct efx_nic *efx) +{ + int phy_id = efx->mii.phy_id; + int ctrl1, ctrl2; + + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_TXDIS); + if (efx->tx_disabled) + ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); + else + ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); + if (ctrl1 != ctrl2) + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_TXDIS, ctrl2); +} + +void mdio_clause45_phy_reconfigure(struct efx_nic *efx) +{ + int phy_id = efx->mii.phy_id; + int ctrl1, ctrl2; + + /* Handle (with debouncing) PMA/PMD loopback */ + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_CTRL1); + + if (efx->loopback_mode == LOOPBACK_PMAPMD) + ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); + else + ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); + + if (ctrl1 != ctrl2) + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_CTRL1, ctrl2); + + /* Handle (with debouncing) PCS loopback */ + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS, + MDIO_MMDREG_CTRL1); + if (efx->loopback_mode == LOOPBACK_PCS) + ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); + else + ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); + + if (ctrl1 != ctrl2) + mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS, + MDIO_MMDREG_CTRL1, ctrl2); + + /* Handle (with debouncing) PHYXS network loopback */ + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, + MDIO_MMDREG_CTRL1); + if (efx->loopback_mode == LOOPBACK_NETWORK) + ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); + else + ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); + + if (ctrl1 != ctrl2) + mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, + MDIO_MMDREG_CTRL1, ctrl2); +} + /** * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO. * @efx: Efx NIC diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index 2214b6d820a..cb99f3f4491 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h @@ -44,11 +44,16 @@ #define MDIO_MMDREG_DEVS1 (6) #define MDIO_MMDREG_CTRL2 (7) #define MDIO_MMDREG_STAT2 (8) +#define MDIO_MMDREG_TXDIS (9) /* Bits in MMDREG_CTRL1 */ /* Reset */ #define MDIO_MMDREG_CTRL1_RESET_LBN (15) #define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) +/* Loopback */ +/* Loopback bit for WIS, PCS, PHYSX and DTEXS */ +#define MDIO_MMDREG_CTRL1_LBACK_LBN (14) +#define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1) /* Bits in MMDREG_STAT1 */ #define MDIO_MMDREG_STAT1_FAULT_LBN (7) @@ -56,6 +61,9 @@ /* Link state */ #define MDIO_MMDREG_STAT1_LINK_LBN (2) #define MDIO_MMDREG_STAT1_LINK_WIDTH (1) +/* Low power ability */ +#define MDIO_MMDREG_STAT1_LPABLE_LBN (1) +#define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1) /* Bits in ID reg */ #define MDIO_ID_REV(_id32) (_id32 & 0xf) @@ -76,6 +84,14 @@ #define MDIO_MMDREG_STAT2_PRESENT_LBN (14) #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) +/* Bits in MMDREG_TXDIS */ +#define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0) +#define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1) + +/* MMD-specific bits, ordered by MMD, then register */ +#define MDIO_PMAPMD_CTRL1_LBACK_LBN (0) +#define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1) + /* PMA type (4 bits) */ #define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) #define MDIO_PMAPMD_CTRL2_10G_EW (0x1) @@ -95,7 +111,7 @@ #define MDIO_PMAPMD_CTRL2_10_BT (0xf) #define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) -/* /\* PHY XGXS lane state *\/ */ +/* PHY XGXS lane state */ #define MDIO_PHYXS_LANE_STATE (0x18) #define MDIO_PHYXS_LANE_ALIGNED_LBN (12) @@ -217,6 +233,12 @@ int mdio_clause45_check_mmds(struct efx_nic *efx, extern int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask); +/* Generic transmit disable support though PMAPMD */ +extern void mdio_clause45_transmit_disable(struct efx_nic *efx); + +/* Generic part of reconfigure: set/clear loopback bits */ +extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx); + /* Read (some of) the PHY settings over MDIO */ extern void mdio_clause45_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index c505482c252..59f261b4171 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -134,6 +134,8 @@ struct efx_special_buffer { * Set only on the final fragment of a packet; %NULL for all other * fragments. When this fragment completes, then we can free this * skb. + * @tsoh: The associated TSO header structure, or %NULL if this + * buffer is not a TSO header. * @dma_addr: DMA address of the fragment. * @len: Length of this fragment. * This field is zero when the queue slot is empty. @@ -144,6 +146,7 @@ struct efx_special_buffer { */ struct efx_tx_buffer { const struct sk_buff *skb; + struct efx_tso_header *tsoh; dma_addr_t dma_addr; unsigned short len; unsigned char continuation; @@ -187,6 +190,13 @@ struct efx_tx_buffer { * variable indicates that the queue is full. This is to * avoid cache-line ping-pong between the xmit path and the * completion path. + * @tso_headers_free: A list of TSO headers allocated for this TX queue + * that are not in use, and so available for new TSO sends. The list + * is protected by the TX queue lock. + * @tso_bursts: Number of times TSO xmit invoked by kernel + * @tso_long_headers: Number of packets with headers too long for standard + * blocks + * @tso_packets: Number of packets via the TSO xmit path */ struct efx_tx_queue { /* Members which don't change on the fast path */ @@ -206,6 +216,10 @@ struct efx_tx_queue { unsigned int insert_count ____cacheline_aligned_in_smp; unsigned int write_count; unsigned int old_read_count; + struct efx_tso_header *tso_headers_free; + unsigned int tso_bursts; + unsigned int tso_long_headers; + unsigned int tso_packets; }; /** @@ -434,6 +448,9 @@ struct efx_board { struct efx_blinker blinker; }; +#define STRING_TABLE_LOOKUP(val, member) \ + member ## _names[val] + enum efx_int_mode { /* Be careful if altering to correct macro below */ EFX_INT_MODE_MSIX = 0, @@ -506,6 +523,7 @@ enum efx_fc_type { * @check_hw: Check hardware * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) * @mmds: MMD presence mask + * @loopbacks: Supported loopback modes mask */ struct efx_phy_operations { int (*init) (struct efx_nic *efx); @@ -515,6 +533,7 @@ struct efx_phy_operations { int (*check_hw) (struct efx_nic *efx); void (*reset_xaui) (struct efx_nic *efx); int mmds; + unsigned loopbacks; }; /* @@ -653,7 +672,6 @@ union efx_multicast_hash { * @phy_op: PHY interface * @phy_data: PHY private data (including PHY-specific stats) * @mii: PHY interface - * @phy_powered: PHY power state * @tx_disabled: PHY transmitter turned off * @link_up: Link status * @link_options: Link options (MII/GMII format) @@ -662,6 +680,9 @@ union efx_multicast_hash { * @multicast_hash: Multicast hash table * @flow_control: Flow control flags - separate RX/TX so can't use link_options * @reconfigure_work: work item for dealing with PHY events + * @loopback_mode: Loopback status + * @loopback_modes: Supported loopback mode bitmask + * @loopback_selftest: Offline self-test private state * * The @priv field of the corresponding &struct net_device points to * this. @@ -721,6 +742,7 @@ struct efx_nic { struct efx_phy_operations *phy_op; void *phy_data; struct mii_if_info mii; + unsigned tx_disabled; int link_up; unsigned int link_options; @@ -732,6 +754,10 @@ struct efx_nic { struct work_struct reconfigure_work; atomic_t rx_reset; + enum efx_loopback_mode loopback_mode; + unsigned int loopback_modes; + + void *loopback_selftest; }; /** diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 551299b462a..670622373dd 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -19,6 +19,7 @@ #include "rx.h" #include "efx.h" #include "falcon.h" +#include "selftest.h" #include "workarounds.h" /* Number of RX descriptors pushed at once. */ @@ -683,6 +684,15 @@ void __efx_rx_packet(struct efx_channel *channel, struct sk_buff *skb; int lro = efx->net_dev->features & NETIF_F_LRO; + /* If we're in loopback test, then pass the packet directly to the + * loopback layer, and free the rx_buf here + */ + if (unlikely(efx->loopback_selftest)) { + efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); + efx_free_rx_buffer(efx, rx_buf); + goto done; + } + if (rx_buf->skb) { prefetch(skb_shinfo(rx_buf->skb)); @@ -736,7 +746,6 @@ void __efx_rx_packet(struct efx_channel *channel, /* Update allocation strategy method */ channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; - /* fall-thru */ done: efx->net_dev->last_rx = jiffies; } diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c new file mode 100644 index 00000000000..cbda15946e8 --- /dev/null +++ b/drivers/net/sfc/selftest.c @@ -0,0 +1,717 @@ +/**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2008 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include <linux/netdevice.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/kernel_stat.h> +#include <linux/pci.h> +#include <linux/ethtool.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/udp.h> +#include <linux/rtnetlink.h> +#include <asm/io.h> +#include "net_driver.h" +#include "ethtool.h" +#include "efx.h" +#include "falcon.h" +#include "selftest.h" +#include "boards.h" +#include "workarounds.h" +#include "mac.h" + +/* + * Loopback test packet structure + * + * The self-test should stress every RSS vector, and unfortunately + * Falcon only performs RSS on TCP/UDP packets. + */ +struct efx_loopback_payload { + struct ethhdr header; + struct iphdr ip; + struct udphdr udp; + __be16 iteration; + const char msg[64]; +} __attribute__ ((packed)); + +/* Loopback test source MAC address */ +static const unsigned char payload_source[ETH_ALEN] = { + 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, +}; + +static const char *payload_msg = + "Hello world! This is an Efx loopback test in progress!"; + +/** + * efx_selftest_state - persistent state during a selftest + * @flush: Drop all packets in efx_loopback_rx_packet + * @packet_count: Number of packets being used in this test + * @skbs: An array of skbs transmitted + * @rx_good: RX good packet count + * @rx_bad: RX bad packet count + * @payload: Payload used in tests + */ +struct efx_selftest_state { + int flush; + int packet_count; + struct sk_buff **skbs; + atomic_t rx_good; + atomic_t rx_bad; + struct efx_loopback_payload payload; +}; + +/************************************************************************** + * + * Configurable values + * + **************************************************************************/ + +/* Level of loopback testing + * + * The maximum packet burst length is 16**(n-1), i.e. + * + * - Level 0 : no packets + * - Level 1 : 1 packet + * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets) + * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets) + * + */ +static unsigned int loopback_test_level = 3; + +/************************************************************************** + * + * Interrupt and event queue testing + * + **************************************************************************/ + +/* Test generation and receipt of interrupts */ +static int efx_test_interrupts(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + struct efx_channel *channel; + + EFX_LOG(efx, "testing interrupts\n"); + tests->interrupt = -1; + + /* Reset interrupt flag */ + efx->last_irq_cpu = -1; + smp_wmb(); + + /* ACK each interrupting event queue. Receiving an interrupt due to + * traffic before a test event is raised is considered a pass */ + efx_for_each_channel_with_interrupt(channel, efx) { + if (channel->work_pending) + efx_process_channel_now(channel); + if (efx->last_irq_cpu >= 0) + goto success; + } + + falcon_generate_interrupt(efx); + + /* Wait for arrival of test interrupt. */ + EFX_LOG(efx, "waiting for test interrupt\n"); + schedule_timeout_uninterruptible(HZ / 10); + if (efx->last_irq_cpu >= 0) + goto success; + + EFX_ERR(efx, "timed out waiting for interrupt\n"); + return -ETIMEDOUT; + + success: + EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n", + efx->interrupt_mode, efx->last_irq_cpu); + tests->interrupt = 1; + return 0; +} + +/* Test generation and receipt of non-interrupting events */ +static int efx_test_eventq(struct efx_channel *channel, + struct efx_self_tests *tests) +{ + unsigned int magic; + + /* Channel specific code, limited to 20 bits */ + magic = (0x00010150 + channel->channel); + EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", + channel->channel, magic); + + tests->eventq_dma[channel->channel] = -1; + tests->eventq_int[channel->channel] = 1; /* fake pass */ + tests->eventq_poll[channel->channel] = 1; /* fake pass */ + + /* Reset flag and zero magic word */ + channel->efx->last_irq_cpu = -1; + channel->eventq_magic = 0; + smp_wmb(); + + falcon_generate_test_event(channel, magic); + udelay(1); + + efx_process_channel_now(channel); + if (channel->eventq_magic != magic) { + EFX_ERR(channel->efx, "channel %d failed to see test event\n", + channel->channel); + return -ETIMEDOUT; + } else { + tests->eventq_dma[channel->channel] = 1; + } + + return 0; +} + +/* Test generation and receipt of interrupting events */ +static int efx_test_eventq_irq(struct efx_channel *channel, + struct efx_self_tests *tests) +{ + unsigned int magic, count; + + /* Channel specific code, limited to 20 bits */ + magic = (0x00010150 + channel->channel); + EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", + channel->channel, magic); + + tests->eventq_dma[channel->channel] = -1; + tests->eventq_int[channel->channel] = -1; + tests->eventq_poll[channel->channel] = -1; + + /* Reset flag and zero magic word */ + channel->efx->last_irq_cpu = -1; + channel->eventq_magic = 0; + smp_wmb(); + + falcon_generate_test_event(channel, magic); + + /* Wait for arrival of interrupt */ + count = 0; + do { + schedule_timeout_uninterruptible(HZ / 100); + + if (channel->work_pending) + efx_process_channel_now(channel); + + if (channel->eventq_magic == magic) + goto eventq_ok; + } while (++count < 2); + + EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n", + channel->channel); + + /* See if interrupt arrived */ + if (channel->efx->last_irq_cpu >= 0) { + EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d " + "during event queue test\n", channel->channel, + raw_smp_processor_id()); + tests->eventq_int[channel->channel] = 1; + } + + /* Check to see if event was received even if interrupt wasn't */ + efx_process_channel_now(channel); + if (channel->eventq_magic == magic) { + EFX_ERR(channel->efx, "channel %d event was generated, but " + "failed to trigger an interrupt\n", channel->channel); + tests->eventq_dma[channel->channel] = 1; + } + + return -ETIMEDOUT; + eventq_ok: + EFX_LOG(channel->efx, "channel %d event queue passed\n", + channel->channel); + tests->eventq_dma[channel->channel] = 1; + tests->eventq_int[channel->channel] = 1; + tests->eventq_poll[channel->channel] = 1; + return 0; +} + +/************************************************************************** + * + * PHY testing + * + **************************************************************************/ + +/* Check PHY presence by reading the PHY ID registers */ +static int efx_test_phy(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + u16 physid1, physid2; + struct mii_if_info *mii = &efx->mii; + struct net_device *net_dev = efx->net_dev; + + if (efx->phy_type == PHY_TYPE_NONE) + return 0; + + EFX_LOG(efx, "testing PHY presence\n"); + tests->phy_ok = -1; + + physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1); + physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2); + + if ((physid1 != 0x0000) && (physid1 != 0xffff) && + (physid2 != 0x0000) && (physid2 != 0xffff)) { + EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n", + mii->phy_id, physid1, physid2); + tests->phy_ok = 1; + return 0; + } + + EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id); + return -ENODEV; +} + +/************************************************************************** + * + * Loopback testing + * NB Only one loopback test can be executing concurrently. + * + **************************************************************************/ + +/* Loopback test RX callback + * This is called for each received packet during loopback testing. + */ +void efx_loopback_rx_packet(struct efx_nic *efx, + const char *buf_ptr, int pkt_len) +{ + struct efx_selftest_state *state = efx->loopback_selftest; + struct efx_loopback_payload *received; + struct efx_loopback_payload *payload; + + BUG_ON(!buf_ptr); + + /* If we are just flushing, then drop the packet */ + if ((state == NULL) || state->flush) + return; + + payload = &state->payload; + + received = (struct efx_loopback_payload *)(char *) buf_ptr; + received->ip.saddr = payload->ip.saddr; + received->ip.check = payload->ip.check; + + /* Check that header exists */ + if (pkt_len < sizeof(received->header)) { + EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " + "test\n", pkt_len, LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that the ethernet header exists */ + if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { + EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check packet length */ + if (pkt_len != sizeof(*payload)) { + EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in " + "%s loopback test\n", pkt_len, (int)sizeof(*payload), + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that IP header matches */ + if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { + EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that msg and padding matches */ + if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { + EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that iteration matches */ + if (received->iteration != payload->iteration) { + EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in " + "%s loopback test\n", ntohs(received->iteration), + ntohs(payload->iteration), LOOPBACK_MODE(efx)); + goto err; + } + + /* Increase correct RX count */ + EFX_TRACE(efx, "got loopback RX in %s loopback test\n", + LOOPBACK_MODE(efx)); + + atomic_inc(&state->rx_good); + return; + + err: +#ifdef EFX_ENABLE_DEBUG + if (atomic_read(&state->rx_bad) == 0) { + EFX_ERR(efx, "received packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + buf_ptr, pkt_len, 0); + EFX_ERR(efx, "expected packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + &state->payload, sizeof(state->payload), 0); + } +#endif + atomic_inc(&state->rx_bad); +} + +/* Initialise an efx_selftest_state for a new iteration */ +static void efx_iterate_state(struct efx_nic *efx) +{ + struct efx_selftest_state *state = efx->loopback_selftest; + struct net_device *net_dev = efx->net_dev; + struct efx_loopback_payload *payload = &state->payload; + + /* Initialise the layerII header */ + memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); + memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); + payload->header.h_proto = htons(ETH_P_IP); + + /* saddr set later and used as incrementing count */ + payload->ip.daddr = htonl(INADDR_LOOPBACK); + payload->ip.ihl = 5; + payload->ip.check = htons(0xdead); + payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); + payload->ip.version = IPVERSION; + payload->ip.protocol = IPPROTO_UDP; + + /* Initialise udp header */ + payload->udp.source = 0; + payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - + sizeof(struct iphdr)); + payload->udp.check = 0; /* checksum ignored */ + + /* Fill out payload */ + payload->iteration = htons(ntohs(payload->iteration) + 1); + memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); + + /* Fill out remaining state members */ + atomic_set(&state->rx_good, 0); + atomic_set(&state->rx_bad, 0); + smp_wmb(); +} + +static int efx_tx_loopback(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_selftest_state *state = efx->loopback_selftest; + struct efx_loopback_payload *payload; + struct sk_buff *skb; + int i, rc; + + /* Transmit N copies of buffer */ + for (i = 0; i < state->packet_count; i++) { + /* Allocate an skb, holding an extra reference for + * transmit completion counting */ + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); + if (!skb) + return -ENOMEM; + state->skbs[i] = skb; + skb_get(skb); + + /* Copy the payload in, incrementing the source address to + * exercise the rss vectors */ + payload = ((struct efx_loopback_payload *) + skb_put(skb, sizeof(state->payload))); + memcpy(payload, &state->payload, sizeof(state->payload)); + payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); + + /* Ensure everything we've written is visible to the + * interrupt handler. */ + smp_wmb(); + + if (NET_DEV_REGISTERED(efx)) + netif_tx_lock_bh(efx->net_dev); + rc = efx_xmit(efx, tx_queue, skb); + if (NET_DEV_REGISTERED(efx)) + netif_tx_unlock_bh(efx->net_dev); + + if (rc != NETDEV_TX_OK) { + EFX_ERR(efx, "TX queue %d could not transmit packet %d " + "of %d in %s loopback test\n", tx_queue->queue, + i + 1, state->packet_count, LOOPBACK_MODE(efx)); + + /* Defer cleaning up the other skbs for the caller */ + kfree_skb(skb); + return -EPIPE; + } + } + + return 0; +} + +static int efx_rx_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_selftest_state *state = efx->loopback_selftest; + struct sk_buff *skb; + int tx_done = 0, rx_good, rx_bad; + int i, rc = 0; + + if (NET_DEV_REGISTERED(efx)) + netif_tx_lock_bh(efx->net_dev); + + /* Count the number of tx completions, and decrement the refcnt. Any + * skbs not already completed will be free'd when the queue is flushed */ + for (i=0; i < state->packet_count; i++) { + skb = state->skbs[i]; + if (skb && !skb_shared(skb)) + ++tx_done; + dev_kfree_skb_any(skb); + } + + if (NET_DEV_REGISTERED(efx)) + netif_tx_unlock_bh(efx->net_dev); + + /* Check TX completion and received packet counts */ + rx_good = atomic_read(&state->rx_good); + rx_bad = atomic_read(&state->rx_bad); + if (tx_done != state->packet_count) { + /* Don't free the skbs; they will be picked up on TX + * overflow or channel teardown. + */ + EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d " + "TX completion events in %s loopback test\n", + tx_queue->queue, tx_done, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Allow to fall through so we see the RX errors as well */ + } + + /* We may always be up to a flush away from our desired packet total */ + if (rx_good != state->packet_count) { + EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d " + "received packets in %s loopback test\n", + tx_queue->queue, rx_good, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Fall through */ + } + + /* Update loopback test structure */ + lb_tests->tx_sent[tx_queue->queue] += state->packet_count; + lb_tests->tx_done[tx_queue->queue] += tx_done; + lb_tests->rx_good += rx_good; + lb_tests->rx_bad += rx_bad; + + return rc; +} + +static int +efx_test_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_selftest_state *state = efx->loopback_selftest; + struct efx_channel *channel; + int i, rc = 0; + + for (i = 0; i < loopback_test_level; i++) { + /* Determine how many packets to send */ + state->packet_count = (efx->type->txd_ring_mask + 1) / 3; + state->packet_count = min(1 << (i << 2), state->packet_count); + state->skbs = kzalloc(sizeof(state->skbs[0]) * + state->packet_count, GFP_KERNEL); + state->flush = 0; + + EFX_LOG(efx, "TX queue %d testing %s loopback with %d " + "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); + + efx_iterate_state(efx); + rc = efx_tx_loopback(tx_queue); + + /* NAPI polling is not enabled, so process channels synchronously */ + schedule_timeout_uninterruptible(HZ / 50); + efx_for_each_channel_with_interrupt(channel, efx) { + if (channel->work_pending) + efx_process_channel_now(channel); + } + + rc |= efx_rx_loopback(tx_queue, lb_tests); + kfree(state->skbs); + + if (rc) { + /* Wait a while to ensure there are no packets + * floating around after a failure. */ + schedule_timeout_uninterruptible(HZ / 10); + return rc; + } + } + + EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length " + "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); + + return rc; +} + +static int efx_test_loopbacks(struct efx_nic *efx, + struct efx_self_tests *tests, + unsigned int loopback_modes) +{ + struct efx_selftest_state *state = efx->loopback_selftest; + struct ethtool_cmd ecmd, ecmd_loopback; + struct efx_tx_queue *tx_queue; + enum efx_loopback_mode old_mode, mode; + int count, rc = 0, link_up; + + rc = efx_ethtool_get_settings(efx->net_dev, &ecmd); + if (rc) { + EFX_ERR(efx, "could not get GMII settings\n"); + return rc; + } + old_mode = efx->loopback_mode; + + /* Disable autonegotiation for the purposes of loopback */ + memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback)); + if (ecmd_loopback.autoneg == AUTONEG_ENABLE) { + ecmd_loopback.autoneg = AUTONEG_DISABLE; + ecmd_loopback.duplex = DUPLEX_FULL; + ecmd_loopback.speed = SPEED_10000; + } + + rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback); + if (rc) { + EFX_ERR(efx, "could not disable autonegotiation\n"); + goto out; + } + tests->loopback_speed = ecmd_loopback.speed; + tests->loopback_full_duplex = ecmd_loopback.duplex; + + /* Test all supported loopback modes */ + for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { + if (!(loopback_modes & (1 << mode))) + continue; + + /* Move the port into the specified loopback mode. */ + state->flush = 1; + efx->loopback_mode = mode; + efx_reconfigure_port(efx); + + /* Wait for the PHY to signal the link is up */ + count = 0; + do { + struct efx_channel *channel = &efx->channel[0]; + + falcon_check_xmac(efx); + schedule_timeout_uninterruptible(HZ / 10); + if (channel->work_pending) + efx_process_channel_now(channel); + /* Wait for PHY events to be processed */ + flush_workqueue(efx->workqueue); + rmb(); + + /* efx->link_up can be 1 even if the XAUI link is down, + * (bug5762). Usually, it's not worth bothering with the + * difference, but for selftests, we need that extra + * guarantee that the link is really, really, up. + */ + link_up = efx->link_up; + if (!falcon_xaui_link_ok(efx)) + link_up = 0; + + } while ((++count < 20) && !link_up); + + /* The link should now be up. If it isn't, there is no point + * in attempting a loopback test */ + if (!link_up) { + EFX_ERR(efx, "loopback %s never came up\n", + LOOPBACK_MODE(efx)); + rc = -EIO; + goto out; + } + + EFX_LOG(efx, "link came up in %s loopback in %d iterations\n", + LOOPBACK_MODE(efx), count); + + /* Test every TX queue */ + efx_for_each_tx_queue(tx_queue, efx) { + rc |= efx_test_loopback(tx_queue, + &tests->loopback[mode]); + if (rc) + goto out; + } + } + + out: + /* Take out of loopback and restore PHY settings */ + state->flush = 1; + efx->loopback_mode = old_mode; + efx_ethtool_set_settings(efx->net_dev, &ecmd); + + return rc; +} + +/************************************************************************** + * + * Entry points + * + *************************************************************************/ + +/* Online (i.e. non-disruptive) testing + * This checks interrupt generation, event delivery and PHY presence. */ +int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests) +{ + struct efx_channel *channel; + int rc = 0; + + EFX_LOG(efx, "performing online self-tests\n"); + + rc |= efx_test_interrupts(efx, tests); + efx_for_each_channel(channel, efx) { + if (channel->has_interrupt) + rc |= efx_test_eventq_irq(channel, tests); + else + rc |= efx_test_eventq(channel, tests); + } + rc |= efx_test_phy(efx, tests); + + if (rc) + EFX_ERR(efx, "failed online self-tests\n"); + + return rc; +} + +/* Offline (i.e. disruptive) testing + * This checks MAC and PHY loopback on the specified port. */ +int efx_offline_test(struct efx_nic *efx, + struct efx_self_tests *tests, unsigned int loopback_modes) +{ + struct efx_selftest_state *state; + int rc = 0; + + EFX_LOG(efx, "performing offline self-tests\n"); + + /* Create a selftest_state structure to hold state for the test */ + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) { + rc = -ENOMEM; + goto out; + } + + /* Set the port loopback_selftest member. From this point on + * all received packets will be dropped. Mark the state as + * "flushing" so all inflight packets are dropped */ + BUG_ON(efx->loopback_selftest); + state->flush = 1; + efx->loopback_selftest = (void *)state; + + rc = efx_test_loopbacks(efx, tests, loopback_modes); + + efx->loopback_selftest = NULL; + wmb(); + kfree(state); + + out: + if (rc) + EFX_ERR(efx, "failed offline self-tests\n"); + + return rc; +} + diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h new file mode 100644 index 00000000000..f6999c2b622 --- /dev/null +++ b/drivers/net/sfc/selftest.h @@ -0,0 +1,50 @@ +/**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2008 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_SELFTEST_H +#define EFX_SELFTEST_H + +#include "net_driver.h" + +/* + * Self tests + */ + +struct efx_loopback_self_tests { + int tx_sent[EFX_MAX_TX_QUEUES]; + int tx_done[EFX_MAX_TX_QUEUES]; + int rx_good; + int rx_bad; +}; + +/* Efx self test results + * For fields which are not counters, 1 indicates success and -1 + * indicates failure. + */ +struct efx_self_tests { + int interrupt; + int eventq_dma[EFX_MAX_CHANNELS]; + int eventq_int[EFX_MAX_CHANNELS]; + int eventq_poll[EFX_MAX_CHANNELS]; + int phy_ok; + int loopback_speed; + int loopback_full_duplex; + struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX]; +}; + +extern void efx_loopback_rx_packet(struct efx_nic *efx, + const char *buf_ptr, int pkt_len); +extern int efx_online_test(struct efx_nic *efx, + struct efx_self_tests *tests); +extern int efx_offline_test(struct efx_nic *efx, + struct efx_self_tests *tests, + unsigned int loopback_modes); + +#endif /* EFX_SELFTEST_H */ diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index 11fa9fb8f48..725d1a539c4 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c @@ -130,6 +130,15 @@ void sfe4001_poweroff(struct efx_nic *efx) (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); } +/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected + * to the FLASH_CFG_1 input on the DSP. We must keep it high at power- + * up to allow writing the flash (done through MDIO from userland). + */ +unsigned int sfe4001_phy_flash_cfg; +module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444); +MODULE_PARM_DESC(phy_flash_cfg, + "Force PHY to enter flash configuration mode"); + /* This board uses an I2C expander to provider power to the PHY, which needs to * be turned on before the PHY can be used. * Context: Process context, rtnl lock held @@ -203,6 +212,8 @@ int sfe4001_poweron(struct efx_nic *efx) out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | (1 << P0_X_TRST_LBN)); + if (sfe4001_phy_flash_cfg) + out |= 1 << P0_EN_3V3X_LBN; rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); if (rc) @@ -226,6 +237,9 @@ int sfe4001_poweron(struct efx_nic *efx) if (in & (1 << P1_AFE_PWD_LBN)) goto done; + /* DSP doesn't look powered in flash config mode */ + if (sfe4001_phy_flash_cfg) + goto done; } while (++count < 20); EFX_INFO(efx, "timed out waiting for power\n"); diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index a2e9f79e47b..b1cd6deec01 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c @@ -24,6 +24,11 @@ MDIO_MMDREG_DEVS0_PCS | \ MDIO_MMDREG_DEVS0_PHYXS) +#define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ + (1 << LOOPBACK_PCS) | \ + (1 << LOOPBACK_PMAPMD) | \ + (1 << LOOPBACK_NETWORK)) + /* We complain if we fail to see the link partner as 10G capable this many * times in a row (must be > 1 as sampling the autoneg. registers is racy) */ @@ -72,6 +77,10 @@ #define PMA_PMD_BIST_RXD_LBN (1) #define PMA_PMD_BIST_AFE_LBN (0) +/* Special Software reset register */ +#define PMA_PMD_EXT_CTRL_REG 49152 +#define PMA_PMD_EXT_SSR_LBN 15 + #define BIST_MAX_DELAY (1000) #define BIST_POLL_DELAY (10) @@ -86,6 +95,11 @@ #define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ #define CLK312_EN_LBN 3 +/* PHYXS registers */ +#define PHYXS_TEST1 (49162) +#define LOOPBACK_NEAR_LBN (8) +#define LOOPBACK_NEAR_WIDTH (1) + /* Boot status register */ #define PCS_BOOT_STATUS_REG (0xd000) #define PCS_BOOT_FATAL_ERR_LBN (0) @@ -106,7 +120,9 @@ MODULE_PARM_DESC(crc_error_reset_threshold, struct tenxpress_phy_data { enum tenxpress_state state; + enum efx_loopback_mode loopback_mode; atomic_t bad_crc_count; + int tx_disabled; int bad_lp_tries; }; @@ -199,10 +215,12 @@ static int tenxpress_phy_init(struct efx_nic *efx) tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); - rc = mdio_clause45_wait_reset_mmds(efx, - TENXPRESS_REQUIRED_DEVS); - if (rc < 0) - goto fail; + if (!sfe4001_phy_flash_cfg) { + rc = mdio_clause45_wait_reset_mmds(efx, + TENXPRESS_REQUIRED_DEVS); + if (rc < 0) + goto fail; + } rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); if (rc < 0) @@ -225,6 +243,35 @@ static int tenxpress_phy_init(struct efx_nic *efx) return rc; } +static int tenxpress_special_reset(struct efx_nic *efx) +{ + int rc, reg; + + EFX_TRACE(efx, "%s\n", __func__); + + /* Initiate reset */ + reg = mdio_clause45_read(efx, efx->mii.phy_id, + MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG); + reg |= (1 << PMA_PMD_EXT_SSR_LBN); + mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, + PMA_PMD_EXT_CTRL_REG, reg); + + msleep(200); + + /* Wait for the blocks to come out of reset */ + rc = mdio_clause45_wait_reset_mmds(efx, + TENXPRESS_REQUIRED_DEVS); + if (rc < 0) + return rc; + + /* Try and reconfigure the device */ + rc = tenxpress_init(efx); + if (rc < 0) + return rc; + + return 0; +} + static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) { struct tenxpress_phy_data *pd = efx->phy_data; @@ -299,11 +346,46 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp) return ok; } +static void tenxpress_phyxs_loopback(struct efx_nic *efx) +{ + int phy_id = efx->mii.phy_id; + int ctrl1, ctrl2; + + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, + PHYXS_TEST1); + if (efx->loopback_mode == LOOPBACK_PHYXS) + ctrl2 |= (1 << LOOPBACK_NEAR_LBN); + else + ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN); + if (ctrl1 != ctrl2) + mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, + PHYXS_TEST1, ctrl2); +} + static void tenxpress_phy_reconfigure(struct efx_nic *efx) { + struct tenxpress_phy_data *phy_data = efx->phy_data; + int loop_change = LOOPBACK_OUT_OF(phy_data, efx, + TENXPRESS_LOOPBACKS); + if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) return; + /* When coming out of transmit disable, coming out of low power + * mode, or moving out of any PHY internal loopback mode, + * perform a special software reset */ + if ((phy_data->tx_disabled && !efx->tx_disabled) || + loop_change) { + (void) tenxpress_special_reset(efx); + falcon_reset_xaui(efx); + } + + mdio_clause45_transmit_disable(efx); + mdio_clause45_phy_reconfigure(efx); + tenxpress_phyxs_loopback(efx); + + phy_data->tx_disabled = efx->tx_disabled; + phy_data->loopback_mode = efx->loopback_mode; efx->link_up = tenxpress_link_ok(efx, 0); efx->link_options = GM_LPA_10000FULL; } @@ -431,4 +513,5 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = { .clear_interrupt = tenxpress_phy_clear_interrupt, .reset_xaui = tenxpress_reset_xaui, .mmds = TENXPRESS_REQUIRED_DEVS, + .loopbacks = TENXPRESS_LOOPBACKS, }; diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index fbb866b2185..9b436f5b488 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -82,6 +82,46 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, } } +/** + * struct efx_tso_header - a DMA mapped buffer for packet headers + * @next: Linked list of free ones. + * The list is protected by the TX queue lock. + * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. + * @dma_addr: The DMA address of the header below. + * + * This controls the memory used for a TSO header. Use TSOH_DATA() + * to find the packet header data. Use TSOH_SIZE() to calculate the + * total size required for a given packet header length. TSO headers + * in the free list are exactly %TSOH_STD_SIZE bytes in size. + */ +struct efx_tso_header { + union { + struct efx_tso_header *next; + size_t unmap_len; + }; + dma_addr_t dma_addr; +}; + +static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb); +static void efx_fini_tso(struct efx_tx_queue *tx_queue); +static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, + struct efx_tso_header *tsoh); + +static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer) +{ + if (buffer->tsoh) { + if (likely(!buffer->tsoh->unmap_len)) { + buffer->tsoh->next = tx_queue->tso_headers_free; + tx_queue->tso_headers_free = buffer->tsoh; + } else { + efx_tsoh_heap_free(tx_queue, buffer->tsoh); + } + buffer->tsoh = NULL; + } +} + /* * Add a socket buffer to a TX queue @@ -114,6 +154,9 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); + if (skb_shinfo((struct sk_buff *)skb)->gso_size) + return efx_enqueue_skb_tso(tx_queue, skb); + /* Get size of the initial fragment */ len = skb_headlen(skb); @@ -166,6 +209,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, insert_ptr = (tx_queue->insert_count & efx->type->txd_ring_mask); buffer = &tx_queue->buffer[insert_ptr]; + efx_tsoh_free(tx_queue, buffer); + EFX_BUG_ON_PARANOID(buffer->tsoh); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->continuation != 1); @@ -432,6 +477,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) efx_release_tx_buffers(tx_queue); + /* Free up TSO header cache */ + efx_fini_tso(tx_queue); + /* Release queue's stop on port, if any */ if (tx_queue->stopped) { tx_queue->stopped = 0; @@ -450,3 +498,619 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) } +/* Efx TCP segmentation acceleration. + * + * Why? Because by doing it here in the driver we can go significantly + * faster than the GSO. + * + * Requires TX checksum offload support. + */ + +/* Number of bytes inserted at the start of a TSO header buffer, + * similar to NET_IP_ALIGN. + */ +#if defined(__i386__) || defined(__x86_64__) +#define TSOH_OFFSET 0 +#else +#define TSOH_OFFSET NET_IP_ALIGN +#endif + +#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) + +/* Total size of struct efx_tso_header, buffer and padding */ +#define TSOH_SIZE(hdr_len) \ + (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) + +/* Size of blocks on free list. Larger blocks must be allocated from + * the heap. + */ +#define TSOH_STD_SIZE 128 + +#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) +#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) +#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) +#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) + +/** + * struct tso_state - TSO state for an SKB + * @remaining_len: Bytes of data we've yet to segment + * @seqnum: Current sequence number + * @packet_space: Remaining space in current packet + * @ifc: Input fragment cursor. + * Where we are in the current fragment of the incoming SKB. These + * values get updated in place when we split a fragment over + * multiple packets. + * @p: Parameters. + * These values are set once at the start of the TSO send and do + * not get changed as the routine progresses. + * + * The state used during segmentation. It is put into this data structure + * just to make it easy to pass into inline functions. + */ +struct tso_state { + unsigned remaining_len; + unsigned seqnum; + unsigned packet_space; + + struct { + /* DMA address of current position */ + dma_addr_t dma_addr; + /* Remaining length */ + unsigned int len; + /* DMA address and length of the whole fragment */ + unsigned int unmap_len; + dma_addr_t unmap_addr; + struct page *page; + unsigned page_off; + } ifc; + + struct { + /* The number of bytes of header */ + unsigned int header_length; + + /* The number of bytes to put in each outgoing segment. */ + int full_packet_size; + + /* Current IPv4 ID, host endian. */ + unsigned ipv4_id; + } p; +}; + + +/* + * Verify that our various assumptions about sk_buffs and the conditions + * under which TSO will be attempted hold true. + */ +static inline void efx_tso_check_safe(const struct sk_buff *skb) +{ + EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); + EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != + skb->protocol); + EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); + EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + + (tcp_hdr(skb)->doff << 2u)) > + skb_headlen(skb)); +} + + +/* + * Allocate a page worth of efx_tso_header structures, and string them + * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. + */ +static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) +{ + + struct pci_dev *pci_dev = tx_queue->efx->pci_dev; + struct efx_tso_header *tsoh; + dma_addr_t dma_addr; + u8 *base_kva, *kva; + + base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); + if (base_kva == NULL) { + EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" + " headers\n"); + return -ENOMEM; + } + + /* pci_alloc_consistent() allocates pages. */ + EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); + + for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { + tsoh = (struct efx_tso_header *)kva; + tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); + tsoh->next = tx_queue->tso_headers_free; + tx_queue->tso_headers_free = tsoh; + } + + return 0; +} + + +/* Free up a TSO header, and all others in the same page. */ +static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, + struct efx_tso_header *tsoh, + struct pci_dev *pci_dev) +{ + struct efx_tso_header **p; + unsigned long base_kva; + dma_addr_t base_dma; + + base_kva = (unsigned long)tsoh & PAGE_MASK; + base_dma = tsoh->dma_addr & PAGE_MASK; + + p = &tx_queue->tso_headers_free; + while (*p != NULL) + if (((unsigned long)*p & PAGE_MASK) == base_kva) + *p = (*p)->next; + else + p = &(*p)->next; + + pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); +} + +static struct efx_tso_header * +efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) +{ + struct efx_tso_header *tsoh; + + tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); + if (unlikely(!tsoh)) + return NULL; + + tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, + TSOH_BUFFER(tsoh), header_len, + PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { + kfree(tsoh); + return NULL; + } + + tsoh->unmap_len = header_len; + return tsoh; +} + +static void +efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) +{ + pci_unmap_single(tx_queue->efx->pci_dev, + tsoh->dma_addr, tsoh->unmap_len, + PCI_DMA_TODEVICE); + kfree(tsoh); +} + +/** + * efx_tx_queue_insert - push descriptors onto the TX queue + * @tx_queue: Efx TX queue + * @dma_addr: DMA address of fragment + * @len: Length of fragment + * @skb: Only non-null for end of last segment + * @end_of_packet: True if last fragment in a packet + * @unmap_addr: DMA address of fragment for unmapping + * @unmap_len: Only set this in last segment of a fragment + * + * Push descriptors onto the TX queue. Return 0 on success or 1 if + * @tx_queue full. + */ +static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned len, + const struct sk_buff *skb, int end_of_packet, + dma_addr_t unmap_addr, unsigned unmap_len) +{ + struct efx_tx_buffer *buffer; + struct efx_nic *efx = tx_queue->efx; + unsigned dma_len, fill_level, insert_ptr, misalign; + int q_space; + + EFX_BUG_ON_PARANOID(len <= 0); + + fill_level = tx_queue->insert_count - tx_queue->old_read_count; + /* -1 as there is no way to represent all descriptors used */ + q_space = efx->type->txd_ring_mask - 1 - fill_level; + + while (1) { + if (unlikely(q_space-- <= 0)) { + /* It might be that completions have happened + * since the xmit path last checked. Update + * the xmit path's copy of read_count. + */ + ++tx_queue->stopped; + /* This memory barrier protects the change of + * stopped from the access of read_count. */ + smp_mb(); + tx_queue->old_read_count = + *(volatile unsigned *)&tx_queue->read_count; + fill_level = (tx_queue->insert_count + - tx_queue->old_read_count); + q_space = efx->type->txd_ring_mask - 1 - fill_level; + if (unlikely(q_space-- <= 0)) + return 1; + smp_mb(); + --tx_queue->stopped; + } + + insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; + buffer = &tx_queue->buffer[insert_ptr]; + ++tx_queue->insert_count; + + EFX_BUG_ON_PARANOID(tx_queue->insert_count - + tx_queue->read_count > + efx->type->txd_ring_mask); + + efx_tsoh_free(tx_queue, buffer); + EFX_BUG_ON_PARANOID(buffer->len); + EFX_BUG_ON_PARANOID(buffer->unmap_len); + EFX_BUG_ON_PARANOID(buffer->skb); + EFX_BUG_ON_PARANOID(buffer->continuation != 1); + EFX_BUG_ON_PARANOID(buffer->tsoh); + + buffer->dma_addr = dma_addr; + + /* Ensure we do not cross a boundary unsupported by H/W */ + dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1; + + misalign = (unsigned)dma_addr & efx->type->bug5391_mask; + if (misalign && dma_len + misalign > 512) + dma_len = 512 - misalign; + + /* If there is enough space to send then do so */ + if (dma_len >= len) + break; + + buffer->len = dma_len; /* Don't set the other members */ + dma_addr += dma_len; + len -= dma_len; + } + + EFX_BUG_ON_PARANOID(!len); + buffer->len = len; + buffer->skb = skb; + buffer->continuation = !end_of_packet; + buffer->unmap_addr = unmap_addr; + buffer->unmap_len = unmap_len; + return 0; +} + + +/* + * Put a TSO header into the TX queue. + * + * This is special-cased because we know that it is small enough to fit in + * a single fragment, and we know it doesn't cross a page boundary. It + * also allows us to not worry about end-of-packet etc. + */ +static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, + struct efx_tso_header *tsoh, unsigned len) +{ + struct efx_tx_buffer *buffer; + + buffer = &tx_queue->buffer[tx_queue->insert_count & + tx_queue->efx->type->txd_ring_mask]; + efx_tsoh_free(tx_queue, buffer); + EFX_BUG_ON_PARANOID(buffer->len); + EFX_BUG_ON_PARANOID(buffer->unmap_len); + EFX_BUG_ON_PARANOID(buffer->skb); + EFX_BUG_ON_PARANOID(buffer->continuation != 1); + EFX_BUG_ON_PARANOID(buffer->tsoh); + buffer->len = len; + buffer->dma_addr = tsoh->dma_addr; + buffer->tsoh = tsoh; + + ++tx_queue->insert_count; +} + + +/* Remove descriptors put into a tx_queue. */ +static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer; + + /* Work backwards until we hit the original insert pointer value */ + while (tx_queue->insert_count != tx_queue->write_count) { + --tx_queue->insert_count; + buffer = &tx_queue->buffer[tx_queue->insert_count & + tx_queue->efx->type->txd_ring_mask]; + efx_tsoh_free(tx_queue, buffer); + EFX_BUG_ON_PARANOID(buffer->skb); + buffer->len = 0; + buffer->continuation = 1; + if (buffer->unmap_len) { + pci_unmap_page(tx_queue->efx->pci_dev, + buffer->unmap_addr, + buffer->unmap_len, PCI_DMA_TODEVICE); + buffer->unmap_len = 0; + } + } +} + + +/* Parse the SKB header and initialise state. */ +static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) +{ + /* All ethernet/IP/TCP headers combined size is TCP header size + * plus offset of TCP header relative to start of packet. + */ + st->p.header_length = ((tcp_hdr(skb)->doff << 2u) + + PTR_DIFF(tcp_hdr(skb), skb->data)); + st->p.full_packet_size = (st->p.header_length + + skb_shinfo(skb)->gso_size); + + st->p.ipv4_id = ntohs(ip_hdr(skb)->id); + st->seqnum = ntohl(tcp_hdr(skb)->seq); + + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); + + st->packet_space = st->p.full_packet_size; + st->remaining_len = skb->len - st->p.header_length; +} + + +/** + * tso_get_fragment - record fragment details and map for DMA + * @st: TSO state + * @efx: Efx NIC + * @data: Pointer to fragment data + * @len: Length of fragment + * + * Record fragment details and map for DMA. Return 0 on success, or + * -%ENOMEM if DMA mapping fails. + */ +static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, + int len, struct page *page, int page_off) +{ + + st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, + len, PCI_DMA_TODEVICE); + if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { + st->ifc.unmap_len = len; + st->ifc.len = len; + st->ifc.dma_addr = st->ifc.unmap_addr; + st->ifc.page = page; + st->ifc.page_off = page_off; + return 0; + } + return -ENOMEM; +} + + +/** + * tso_fill_packet_with_fragment - form descriptors for the current fragment + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * @st: TSO state + * + * Form descriptors for the current fragment, until we reach the end + * of fragment or end-of-packet. Return 0 on success, 1 if not enough + * space in @tx_queue. + */ +static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) +{ + + int n, end_of_packet, rc; + + if (st->ifc.len == 0) + return 0; + if (st->packet_space == 0) + return 0; + + EFX_BUG_ON_PARANOID(st->ifc.len <= 0); + EFX_BUG_ON_PARANOID(st->packet_space <= 0); + + n = min(st->ifc.len, st->packet_space); + + st->packet_space -= n; + st->remaining_len -= n; + st->ifc.len -= n; + st->ifc.page_off += n; + end_of_packet = st->remaining_len == 0 || st->packet_space == 0; + + rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, + st->remaining_len ? NULL : skb, + end_of_packet, st->ifc.unmap_addr, + st->ifc.len ? 0 : st->ifc.unmap_len); + + st->ifc.dma_addr += n; + + return rc; +} + + +/** + * tso_start_new_packet - generate a new header and prepare for the new packet + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * @st: TSO state + * + * Generate a new header and prepare for the new packet. Return 0 on + * success, or -1 if failed to alloc header. + */ +static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) +{ + struct efx_tso_header *tsoh; + struct iphdr *tsoh_iph; + struct tcphdr *tsoh_th; + unsigned ip_length; + u8 *header; + + /* Allocate a DMA-mapped header buffer. */ + if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { + if (tx_queue->tso_headers_free == NULL) + if (efx_tsoh_block_alloc(tx_queue)) + return -1; + EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); + tsoh = tx_queue->tso_headers_free; + tx_queue->tso_headers_free = tsoh->next; + tsoh->unmap_len = 0; + } else { + tx_queue->tso_long_headers++; + tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length); + if (unlikely(!tsoh)) + return -1; + } + + header = TSOH_BUFFER(tsoh); + tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); + tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); + + /* Copy and update the headers. */ + memcpy(header, skb->data, st->p.header_length); + + tsoh_th->seq = htonl(st->seqnum); + st->seqnum += skb_shinfo(skb)->gso_size; + if (st->remaining_len > skb_shinfo(skb)->gso_size) { + /* This packet will not finish the TSO burst. */ + ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb); + tsoh_th->fin = 0; + tsoh_th->psh = 0; + } else { + /* This packet will be the last in the TSO burst. */ + ip_length = (st->p.header_length - ETH_HDR_LEN(skb) + + st->remaining_len); + tsoh_th->fin = tcp_hdr(skb)->fin; + tsoh_th->psh = tcp_hdr(skb)->psh; + } + tsoh_iph->tot_len = htons(ip_length); + + /* Linux leaves suitable gaps in the IP ID space for us to fill. */ + tsoh_iph->id = htons(st->p.ipv4_id); + st->p.ipv4_id++; + + st->packet_space = skb_shinfo(skb)->gso_size; + ++tx_queue->tso_packets; + + /* Form a descriptor for this header. */ + efx_tso_put_header(tx_queue, tsoh, st->p.header_length); + + return 0; +} + + +/** + * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * + * Context: You must hold netif_tx_lock() to call this function. + * + * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if + * @skb was not enqueued. In all cases @skb is consumed. Return + * %NETDEV_TX_OK or %NETDEV_TX_BUSY. + */ +static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb) +{ + int frag_i, rc, rc2 = NETDEV_TX_OK; + struct tso_state state; + skb_frag_t *f; + + /* Verify TSO is safe - these checks should never fail. */ + efx_tso_check_safe(skb); + + EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); + + tso_start(&state, skb); + + /* Assume that skb header area contains exactly the headers, and + * all payload is in the frag list. + */ + if (skb_headlen(skb) == state.p.header_length) { + /* Grab the first payload fragment. */ + EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); + frag_i = 0; + f = &skb_shinfo(skb)->frags[frag_i]; + rc = tso_get_fragment(&state, tx_queue->efx, + f->size, f->page, f->page_offset); + if (rc) + goto mem_err; + } else { + /* It may look like this code fragment assumes that the + * skb->data portion does not cross a page boundary, but + * that is not the case. It is guaranteed to be direct + * mapped memory, and therefore is physically contiguous, + * and so DMA will work fine. kmap_atomic() on this region + * will just return the direct mapping, so that will work + * too. + */ + int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1); + int hl = state.p.header_length; + rc = tso_get_fragment(&state, tx_queue->efx, + skb_headlen(skb) - hl, + virt_to_page(skb->data), page_off + hl); + if (rc) + goto mem_err; + frag_i = -1; + } + + if (tso_start_new_packet(tx_queue, skb, &state) < 0) + goto mem_err; + + while (1) { + rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); + if (unlikely(rc)) + goto stop; + + /* Move onto the next fragment? */ + if (state.ifc.len == 0) { + if (++frag_i >= skb_shinfo(skb)->nr_frags) + /* End of payload reached. */ + break; + f = &skb_shinfo(skb)->frags[frag_i]; + rc = tso_get_fragment(&state, tx_queue->efx, + f->size, f->page, f->page_offset); + if (rc) + goto mem_err; + } + + /* Start at new packet? */ + if (state.packet_space == 0 && + tso_start_new_packet(tx_queue, skb, &state) < 0) + goto mem_err; + } + + /* Pass off to hardware */ + falcon_push_buffers(tx_queue); + + tx_queue->tso_bursts++; + return NETDEV_TX_OK; + + mem_err: + EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping" + " error\n"); + dev_kfree_skb_any((struct sk_buff *)skb); + goto unwind; + + stop: + rc2 = NETDEV_TX_BUSY; + + /* Stop the queue if it wasn't stopped before. */ + if (tx_queue->stopped == 1) + efx_stop_queue(tx_queue->efx); + + unwind: + efx_enqueue_unwind(tx_queue); + return rc2; +} + + +/* + * Free up all TSO datastructures associated with tx_queue. This + * routine should be called only once the tx_queue is both empty and + * will no longer be used. + */ +static void efx_fini_tso(struct efx_tx_queue *tx_queue) +{ + unsigned i; + + if (tx_queue->buffer) + for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) + efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); + + while (tx_queue->tso_headers_free != NULL) + efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, + tx_queue->efx->pci_dev); +} diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c index 66dd5bf1eaa..3b9f9ddbc37 100644 --- a/drivers/net/sfc/xfp_phy.c +++ b/drivers/net/sfc/xfp_phy.c @@ -24,6 +24,10 @@ MDIO_MMDREG_DEVS0_PMAPMD | \ MDIO_MMDREG_DEVS0_PHYXS) +#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ + (1 << LOOPBACK_PMAPMD) | \ + (1 << LOOPBACK_NETWORK)) + /****************************************************************************/ /* Quake-specific MDIO registers */ #define MDIO_QUAKE_LED0_REG (0xD006) @@ -35,6 +39,10 @@ void xfp_set_led(struct efx_nic *p, int led, int mode) mode); } +struct xfp_phy_data { + int tx_disabled; +}; + #define XFP_MAX_RESET_TIME 500 #define XFP_RESET_WAIT 10 @@ -72,18 +80,31 @@ static int xfp_reset_phy(struct efx_nic *efx) static int xfp_phy_init(struct efx_nic *efx) { + struct xfp_phy_data *phy_data; u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); int rc; + phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); + efx->phy_data = (void *) phy_data; + EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), MDIO_ID_REV(devid)); + phy_data->tx_disabled = efx->tx_disabled; + rc = xfp_reset_phy(efx); EFX_INFO(efx, "XFP: PHY init %s.\n", rc ? "failed" : "successful"); + if (rc < 0) + goto fail; + return 0; + + fail: + kfree(efx->phy_data); + efx->phy_data = NULL; return rc; } @@ -110,6 +131,16 @@ static int xfp_phy_check_hw(struct efx_nic *efx) static void xfp_phy_reconfigure(struct efx_nic *efx) { + struct xfp_phy_data *phy_data = efx->phy_data; + + /* Reset the PHY when moving from tx off to tx on */ + if (phy_data->tx_disabled && !efx->tx_disabled) + xfp_reset_phy(efx); + + mdio_clause45_transmit_disable(efx); + mdio_clause45_phy_reconfigure(efx); + + phy_data->tx_disabled = efx->tx_disabled; efx->link_up = xfp_link_ok(efx); efx->link_options = GM_LPA_10000FULL; } @@ -119,6 +150,10 @@ static void xfp_phy_fini(struct efx_nic *efx) { /* Clobber the LED if it was blinking */ efx->board_info.blink(efx, 0); + + /* Free the context block */ + kfree(efx->phy_data); + efx->phy_data = NULL; } struct efx_phy_operations falcon_xfp_phy_ops = { @@ -129,4 +164,5 @@ struct efx_phy_operations falcon_xfp_phy_ops = { .clear_interrupt = xfp_phy_clear_interrupt, .reset_xaui = efx_port_dummy_op_void, .mmds = XFP_REQUIRED_DEVS, + .loopbacks = XFP_LOOPBACKS, }; diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 7bb3ba9bcbd..c0a5eea2000 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -1966,13 +1966,13 @@ struct sky2_status_le { struct tx_ring_info { struct sk_buff *skb; DECLARE_PCI_UNMAP_ADDR(mapaddr); - DECLARE_PCI_UNMAP_ADDR(maplen); + DECLARE_PCI_UNMAP_LEN(maplen); }; struct rx_ring_info { struct sk_buff *skb; dma_addr_t data_addr; - DECLARE_PCI_UNMAP_ADDR(data_size); + DECLARE_PCI_UNMAP_LEN(data_size); dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; }; diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index a59c1f224aa..2511ca7a12a 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -434,10 +434,6 @@ static int uli526x_open(struct net_device *dev) ULI526X_DBUG(0, "uli526x_open", 0); - ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev); - if (ret) - return ret; - /* system variable init */ db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set; db->tx_packet_cnt = 0; @@ -456,6 +452,10 @@ static int uli526x_open(struct net_device *dev) /* Initialize ULI526X board */ uli526x_init(dev); + ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev); + if (ret) + return ret; + /* Active System Interface */ netif_wake_queue(dev); @@ -1368,6 +1368,12 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr) * This setup frame initialize ULI526X address filter mode */ +#ifdef __BIG_ENDIAN +#define FLT_SHIFT 16 +#else +#define FLT_SHIFT 0 +#endif + static void send_filter_frame(struct net_device *dev, int mc_cnt) { struct uli526x_board_info *db = netdev_priv(dev); @@ -1384,27 +1390,27 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) /* Node address */ addrptr = (u16 *) dev->dev_addr; - *suptr++ = addrptr[0]; - *suptr++ = addrptr[1]; - *suptr++ = addrptr[2]; + *suptr++ = addrptr[0] << FLT_SHIFT; + *suptr++ = addrptr[1] << FLT_SHIFT; + *suptr++ = addrptr[2] << FLT_SHIFT; /* broadcast address */ - *suptr++ = 0xffff; - *suptr++ = 0xffff; - *suptr++ = 0xffff; + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; /* fit the multicast address */ for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { addrptr = (u16 *) mcptr->dmi_addr; - *suptr++ = addrptr[0]; - *suptr++ = addrptr[1]; - *suptr++ = addrptr[2]; + *suptr++ = addrptr[0] << FLT_SHIFT; + *suptr++ = addrptr[1] << FLT_SHIFT; + *suptr++ = addrptr[2] << FLT_SHIFT; } for (; i<14; i++) { - *suptr++ = 0xffff; - *suptr++ = 0xffff; - *suptr++ = 0xffff; + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; } /* prepare the setup frame */ diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 281ce3d3953..ca0bdac07a7 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -62,7 +62,6 @@ #endif /* UGETH_VERBOSE_DEBUG */ #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 -void uec_set_ethtool_ops(struct net_device *netdev); static DEFINE_SPINLOCK(ugeth_lock); @@ -216,7 +215,8 @@ static struct list_head *dequeue(struct list_head *lh) } } -static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd) +static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, + u8 __iomem *bd) { struct sk_buff *skb = NULL; @@ -236,21 +236,22 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd) skb->dev = ugeth->dev; - out_be32(&((struct qe_bd *)bd)->buf, + out_be32(&((struct qe_bd __iomem *)bd)->buf, dma_map_single(NULL, skb->data, ugeth->ug_info->uf_info.max_rx_buf_length + UCC_GETH_RX_DATA_BUF_ALIGNMENT, DMA_FROM_DEVICE)); - out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W))); + out_be32((u32 __iomem *)bd, + (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); return skb; } static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) { - u8 *bd; + u8 __iomem *bd; u32 bd_status; struct sk_buff *skb; int i; @@ -259,7 +260,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) i = 0; do { - bd_status = in_be32((u32*)bd); + bd_status = in_be32((u32 __iomem *)bd); skb = get_new_skb(ugeth, bd); if (!skb) /* If can not allocate data buffer, @@ -277,7 +278,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) } static int fill_init_enet_entries(struct ucc_geth_private *ugeth, - volatile u32 *p_start, + u32 *p_start, u8 num_entries, u32 thread_size, u32 thread_alignment, @@ -316,7 +317,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth, } static int return_init_enet_entries(struct ucc_geth_private *ugeth, - volatile u32 *p_start, + u32 *p_start, u8 num_entries, enum qe_risc_allocation risc, int skip_page_for_first_entry) @@ -326,21 +327,22 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth, int snum; for (i = 0; i < num_entries; i++) { + u32 val = *p_start; + /* Check that this entry was actually valid -- needed in case failed in allocations */ - if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { + if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { snum = - (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> + (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> ENET_INIT_PARAM_SNUM_SHIFT; qe_put_snum((u8) snum); if (!((i == 0) && skip_page_for_first_entry)) { /* First entry of Rx does not have page */ init_enet_offset = - (in_be32(p_start) & - ENET_INIT_PARAM_PTR_MASK); + (val & ENET_INIT_PARAM_PTR_MASK); qe_muram_free(init_enet_offset); } - *(p_start++) = 0; /* Just for cosmetics */ + *p_start++ = 0; } } @@ -349,7 +351,7 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth, #ifdef DEBUG static int dump_init_enet_entries(struct ucc_geth_private *ugeth, - volatile u32 *p_start, + u32 __iomem *p_start, u8 num_entries, u32 thread_size, enum qe_risc_allocation risc, @@ -360,11 +362,13 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth, int snum; for (i = 0; i < num_entries; i++) { + u32 val = in_be32(p_start); + /* Check that this entry was actually valid -- needed in case failed in allocations */ - if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { + if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { snum = - (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> + (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> ENET_INIT_PARAM_SNUM_SHIFT; qe_put_snum((u8) snum); if (!((i == 0) && skip_page_for_first_entry)) { @@ -440,7 +444,7 @@ static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth, static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) { - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; if (!(paddr_num < NUM_OF_PADDRS)) { ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); @@ -448,7 +452,7 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) } p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> addressfiltering; /* Writing address ff.ff.ff.ff.ff.ff disables address @@ -463,11 +467,11 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, u8 *p_enet_addr) { - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; u32 cecr_subblock; p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> addressfiltering; cecr_subblock = @@ -487,7 +491,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) { struct ucc_fast_private *uccf; - struct ucc_geth *ug_regs; + struct ucc_geth __iomem *ug_regs; u32 maccfg2, uccm; uccf = ugeth->uccf; @@ -507,7 +511,7 @@ static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) { struct ucc_fast_private *uccf; - struct ucc_geth *ug_regs; + struct ucc_geth __iomem *ug_regs; u32 maccfg2, uccm; uccf = ugeth->uccf; @@ -538,13 +542,13 @@ static void get_statistics(struct ucc_geth_private *ugeth, rx_firmware_statistics, struct ucc_geth_hardware_statistics *hardware_statistics) { - struct ucc_fast *uf_regs; - struct ucc_geth *ug_regs; + struct ucc_fast __iomem *uf_regs; + struct ucc_geth __iomem *ug_regs; struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; ug_regs = ugeth->ug_regs; - uf_regs = (struct ucc_fast *) ug_regs; + uf_regs = (struct ucc_fast __iomem *) ug_regs; p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; @@ -1132,9 +1136,9 @@ static void dump_regs(struct ucc_geth_private *ugeth) } #endif /* DEBUG */ -static void init_default_reg_vals(volatile u32 *upsmr_register, - volatile u32 *maccfg1_register, - volatile u32 *maccfg2_register) +static void init_default_reg_vals(u32 __iomem *upsmr_register, + u32 __iomem *maccfg1_register, + u32 __iomem *maccfg2_register) { out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); @@ -1148,7 +1152,7 @@ static int init_half_duplex_params(int alt_beb, u8 alt_beb_truncation, u8 max_retransmissions, u8 collision_window, - volatile u32 *hafdup_register) + u32 __iomem *hafdup_register) { u32 value = 0; @@ -1180,7 +1184,7 @@ static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, u8 non_btb_ipg, u8 min_ifg, u8 btb_ipg, - volatile u32 *ipgifg_register) + u32 __iomem *ipgifg_register) { u32 value = 0; @@ -1215,9 +1219,9 @@ int init_flow_control_params(u32 automatic_flow_control_mode, int tx_flow_control_enable, u16 pause_period, u16 extension_field, - volatile u32 *upsmr_register, - volatile u32 *uempr_register, - volatile u32 *maccfg1_register) + u32 __iomem *upsmr_register, + u32 __iomem *uempr_register, + u32 __iomem *maccfg1_register) { u32 value = 0; @@ -1243,8 +1247,8 @@ int init_flow_control_params(u32 automatic_flow_control_mode, static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, int auto_zero_hardware_statistics, - volatile u32 *upsmr_register, - volatile u16 *uescr_register) + u32 __iomem *upsmr_register, + u16 __iomem *uescr_register) { u32 upsmr_value = 0; u16 uescr_value = 0; @@ -1270,12 +1274,12 @@ static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, static int init_firmware_statistics_gathering_mode(int enable_tx_firmware_statistics, int enable_rx_firmware_statistics, - volatile u32 *tx_rmon_base_ptr, + u32 __iomem *tx_rmon_base_ptr, u32 tx_firmware_statistics_structure_address, - volatile u32 *rx_rmon_base_ptr, + u32 __iomem *rx_rmon_base_ptr, u32 rx_firmware_statistics_structure_address, - volatile u16 *temoder_register, - volatile u32 *remoder_register) + u16 __iomem *temoder_register, + u32 __iomem *remoder_register) { /* Note: this function does not check if */ /* the parameters it receives are NULL */ @@ -1307,8 +1311,8 @@ static int init_mac_station_addr_regs(u8 address_byte_0, u8 address_byte_3, u8 address_byte_4, u8 address_byte_5, - volatile u32 *macstnaddr1_register, - volatile u32 *macstnaddr2_register) + u32 __iomem *macstnaddr1_register, + u32 __iomem *macstnaddr2_register) { u32 value = 0; @@ -1344,7 +1348,7 @@ static int init_mac_station_addr_regs(u8 address_byte_0, } static int init_check_frame_length_mode(int length_check, - volatile u32 *maccfg2_register) + u32 __iomem *maccfg2_register) { u32 value = 0; @@ -1360,7 +1364,7 @@ static int init_check_frame_length_mode(int length_check, } static int init_preamble_length(u8 preamble_length, - volatile u32 *maccfg2_register) + u32 __iomem *maccfg2_register) { u32 value = 0; @@ -1376,7 +1380,7 @@ static int init_preamble_length(u8 preamble_length, static int init_rx_parameters(int reject_broadcast, int receive_short_frames, - int promiscuous, volatile u32 *upsmr_register) + int promiscuous, u32 __iomem *upsmr_register) { u32 value = 0; @@ -1403,7 +1407,7 @@ static int init_rx_parameters(int reject_broadcast, } static int init_max_rx_buff_len(u16 max_rx_buf_len, - volatile u16 *mrblr_register) + u16 __iomem *mrblr_register) { /* max_rx_buf_len value must be a multiple of 128 */ if ((max_rx_buf_len == 0) @@ -1415,8 +1419,8 @@ static int init_max_rx_buff_len(u16 max_rx_buf_len, } static int init_min_frame_len(u16 min_frame_length, - volatile u16 *minflr_register, - volatile u16 *mrblr_register) + u16 __iomem *minflr_register, + u16 __iomem *mrblr_register) { u16 mrblr_value = 0; @@ -1431,8 +1435,8 @@ static int init_min_frame_len(u16 min_frame_length, static int adjust_enet_interface(struct ucc_geth_private *ugeth) { struct ucc_geth_info *ug_info; - struct ucc_geth *ug_regs; - struct ucc_fast *uf_regs; + struct ucc_geth __iomem *ug_regs; + struct ucc_fast __iomem *uf_regs; int ret_val; u32 upsmr, maccfg2, tbiBaseAddress; u16 value; @@ -1517,8 +1521,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) static void adjust_link(struct net_device *dev) { struct ucc_geth_private *ugeth = netdev_priv(dev); - struct ucc_geth *ug_regs; - struct ucc_fast *uf_regs; + struct ucc_geth __iomem *ug_regs; + struct ucc_fast __iomem *uf_regs; struct phy_device *phydev = ugeth->phydev; unsigned long flags; int new_state = 0; @@ -1678,9 +1682,9 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth) uccf = ugeth->uccf; /* Clear acknowledge bit */ - temp = ugeth->p_rx_glbl_pram->rxgstpack; + temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; - ugeth->p_rx_glbl_pram->rxgstpack = temp; + out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); /* Keep issuing command and checking acknowledge bit until it is asserted, according to spec */ @@ -1692,7 +1696,7 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth) qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); - temp = ugeth->p_rx_glbl_pram->rxgstpack; + temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); uccf->stopped_rx = 1; @@ -1991,19 +1995,20 @@ static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * enum enet_addr_type enet_addr_type) { - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; struct ucc_fast_private *uccf; enum comm_dir comm_dir; struct list_head *p_lh; u16 i, num; - u32 *addr_h, *addr_l; + u32 __iomem *addr_h; + u32 __iomem *addr_l; u8 *p_counter; uccf = ugeth->uccf; p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram-> - addressfiltering; + (struct ucc_geth_82xx_address_filtering_pram __iomem *) + ugeth->p_rx_glbl_pram->addressfiltering; if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { addr_h = &(p_82xx_addr_filt->gaddr_h); @@ -2079,7 +2084,7 @@ static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *uge static void ucc_geth_memclean(struct ucc_geth_private *ugeth) { u16 i, j; - u8 *bd; + u8 __iomem *bd; if (!ugeth) return; @@ -2154,8 +2159,8 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { if (ugeth->tx_skbuff[i][j]) { dma_unmap_single(NULL, - ((struct qe_bd *)bd)->buf, - (in_be32((u32 *)bd) & + in_be32(&((struct qe_bd __iomem *)bd)->buf), + (in_be32((u32 __iomem *)bd) & BD_LENGTH_MASK), DMA_TO_DEVICE); dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); @@ -2182,7 +2187,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { if (ugeth->rx_skbuff[i][j]) { dma_unmap_single(NULL, - ((struct qe_bd *)bd)->buf, + in_be32(&((struct qe_bd __iomem *)bd)->buf), ugeth->ug_info-> uf_info.max_rx_buf_length + UCC_GETH_RX_DATA_BUF_ALIGNMENT, @@ -2218,8 +2223,8 @@ static void ucc_geth_set_multi(struct net_device *dev) { struct ucc_geth_private *ugeth; struct dev_mc_list *dmi; - struct ucc_fast *uf_regs; - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; + struct ucc_fast __iomem *uf_regs; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; int i; ugeth = netdev_priv(dev); @@ -2228,14 +2233,14 @@ static void ucc_geth_set_multi(struct net_device *dev) if (dev->flags & IFF_PROMISC) { - uf_regs->upsmr |= UPSMR_PRO; + out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO); } else { - uf_regs->upsmr &= ~UPSMR_PRO; + out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO); p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> p_rx_glbl_pram->addressfiltering; if (dev->flags & IFF_ALLMULTI) { @@ -2270,7 +2275,7 @@ static void ucc_geth_set_multi(struct net_device *dev) static void ucc_geth_stop(struct ucc_geth_private *ugeth) { - struct ucc_geth *ug_regs = ugeth->ug_regs; + struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; struct phy_device *phydev = ugeth->phydev; u32 tempval; @@ -2419,20 +2424,20 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) return -ENOMEM; } - ugeth->ug_regs = (struct ucc_geth *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); + ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); return 0; } static int ucc_geth_startup(struct ucc_geth_private *ugeth) { - struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; - struct ucc_geth_init_pram *p_init_enet_pram; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + struct ucc_geth_init_pram __iomem *p_init_enet_pram; struct ucc_fast_private *uccf; struct ucc_geth_info *ug_info; struct ucc_fast_info *uf_info; - struct ucc_fast *uf_regs; - struct ucc_geth *ug_regs; + struct ucc_fast __iomem *uf_regs; + struct ucc_geth __iomem *ug_regs; int ret_val = -EINVAL; u32 remoder = UCC_GETH_REMODER_INIT; u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; @@ -2440,7 +2445,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) u16 temoder = UCC_GETH_TEMODER_INIT; u16 test; u8 function_code = 0; - u8 *bd, *endOfRing; + u8 __iomem *bd; + u8 __iomem *endOfRing; u8 numThreadsRxNumerical, numThreadsTxNumerical; ugeth_vdbg("%s: IN", __FUNCTION__); @@ -2602,11 +2608,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) align = UCC_GETH_TX_BD_RING_ALIGNMENT; ugeth->tx_bd_ring_offset[j] = - kmalloc((u32) (length + align), GFP_KERNEL); + (u32) kmalloc((u32) (length + align), GFP_KERNEL); if (ugeth->tx_bd_ring_offset[j] != 0) ugeth->p_tx_bd_ring[j] = - (void*)((ugeth->tx_bd_ring_offset[j] + + (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + align) & ~(align - 1)); } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { ugeth->tx_bd_ring_offset[j] = @@ -2614,7 +2620,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_TX_BD_RING_ALIGNMENT); if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) ugeth->p_tx_bd_ring[j] = - (u8 *) qe_muram_addr(ugeth-> + (u8 __iomem *) qe_muram_addr(ugeth-> tx_bd_ring_offset[j]); } if (!ugeth->p_tx_bd_ring[j]) { @@ -2626,8 +2632,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } /* Zero unused end of bd ring, according to spec */ - memset(ugeth->p_tx_bd_ring[j] + - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0, + memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + + ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); } @@ -2639,10 +2645,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) align = UCC_GETH_RX_BD_RING_ALIGNMENT; ugeth->rx_bd_ring_offset[j] = - kmalloc((u32) (length + align), GFP_KERNEL); + (u32) kmalloc((u32) (length + align), GFP_KERNEL); if (ugeth->rx_bd_ring_offset[j] != 0) ugeth->p_rx_bd_ring[j] = - (void*)((ugeth->rx_bd_ring_offset[j] + + (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + align) & ~(align - 1)); } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { ugeth->rx_bd_ring_offset[j] = @@ -2650,7 +2656,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_RX_BD_RING_ALIGNMENT); if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) ugeth->p_rx_bd_ring[j] = - (u8 *) qe_muram_addr(ugeth-> + (u8 __iomem *) qe_muram_addr(ugeth-> rx_bd_ring_offset[j]); } if (!ugeth->p_rx_bd_ring[j]) { @@ -2685,14 +2691,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { /* clear bd buffer */ - out_be32(&((struct qe_bd *)bd)->buf, 0); + out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); /* set bd status and length */ - out_be32((u32 *)bd, 0); + out_be32((u32 __iomem *)bd, 0); bd += sizeof(struct qe_bd); } bd -= sizeof(struct qe_bd); /* set bd status and length */ - out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */ + out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ } /* Init Rx bds */ @@ -2717,14 +2723,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { /* set bd status and length */ - out_be32((u32 *)bd, R_I); + out_be32((u32 __iomem *)bd, R_I); /* clear bd buffer */ - out_be32(&((struct qe_bd *)bd)->buf, 0); + out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); bd += sizeof(struct qe_bd); } bd -= sizeof(struct qe_bd); /* set bd status and length */ - out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */ + out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ } /* @@ -2744,10 +2750,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_tx_glbl_pram = - (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth-> + (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> tx_glbl_pram_offset); /* Zero out p_tx_glbl_pram */ - memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); + memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); /* Fill global PRAM */ @@ -2768,7 +2774,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_thread_data_tx = - (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth-> + (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> thread_dat_tx_offset); out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); @@ -2779,7 +2785,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* iphoffset */ for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) - ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; + out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], + ug_info->iphoffset[i]); /* SQPTR */ /* Size varies with number of Tx queues */ @@ -2797,7 +2804,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_send_q_mem_reg = - (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth-> + (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> send_q_mem_reg_offset); out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); @@ -2841,25 +2848,26 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_scheduler = - (struct ucc_geth_scheduler *) qe_muram_addr(ugeth-> + (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> scheduler_offset); out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, ugeth->scheduler_offset); /* Zero out p_scheduler */ - memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); + memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); /* Set values in scheduler */ out_be32(&ugeth->p_scheduler->mblinterval, ug_info->mblinterval); out_be16(&ugeth->p_scheduler->nortsrbytetime, ug_info->nortsrbytetime); - ugeth->p_scheduler->fracsiz = ug_info->fracsiz; - ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; - ugeth->p_scheduler->txasap = ug_info->txasap; - ugeth->p_scheduler->extrabw = ug_info->extrabw; + out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); + out_8(&ugeth->p_scheduler->strictpriorityq, + ug_info->strictpriorityq); + out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); + out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); for (i = 0; i < NUM_TX_QUEUES; i++) - ugeth->p_scheduler->weightfactor[i] = - ug_info->weightfactor[i]; + out_8(&ugeth->p_scheduler->weightfactor[i], + ug_info->weightfactor[i]); /* Set pointers to cpucount registers in scheduler */ ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); @@ -2890,10 +2898,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_tx_fw_statistics_pram = - (struct ucc_geth_tx_firmware_statistics_pram *) + (struct ucc_geth_tx_firmware_statistics_pram __iomem *) qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); /* Zero out p_tx_fw_statistics_pram */ - memset(ugeth->p_tx_fw_statistics_pram, + memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); } @@ -2930,10 +2938,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_rx_glbl_pram = - (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth-> + (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> rx_glbl_pram_offset); /* Zero out p_rx_glbl_pram */ - memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); + memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); /* Fill global PRAM */ @@ -2953,7 +2961,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_thread_data_rx = - (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth-> + (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> thread_dat_rx_offset); out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); @@ -2976,10 +2984,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } ugeth->p_rx_fw_statistics_pram = - (struct ucc_geth_rx_firmware_statistics_pram *) + (struct ucc_geth_rx_firmware_statistics_pram __iomem *) qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); /* Zero out p_rx_fw_statistics_pram */ - memset(ugeth->p_rx_fw_statistics_pram, 0, + memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, sizeof(struct ucc_geth_rx_firmware_statistics_pram)); } @@ -3000,7 +3008,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_rx_irq_coalescing_tbl = - (struct ucc_geth_rx_interrupt_coalescing_table *) + (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, ugeth->rx_irq_coalescing_tbl_offset); @@ -3069,11 +3077,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_rx_bd_qs_tbl = - (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth-> + (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> rx_bd_qs_tbl_offset); out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); /* Zero out p_rx_bd_qs_tbl */ - memset(ugeth->p_rx_bd_qs_tbl, + memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, 0, ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + sizeof(struct ucc_geth_rx_prefetched_bds))); @@ -3133,7 +3141,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) &ugeth->p_rx_glbl_pram->remoder); /* function code register */ - ugeth->p_rx_glbl_pram->rstate = function_code; + out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); /* initialize extended filtering */ if (ug_info->rxExtendedFiltering) { @@ -3160,7 +3168,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } ugeth->p_exf_glbl_param = - (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth-> + (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> exf_glbl_param_offset); out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, ugeth->exf_glbl_param_offset); @@ -3175,7 +3183,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); p_82xx_addr_filt = - (struct ucc_geth_82xx_address_filtering_pram *) ugeth-> + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> p_rx_glbl_pram->addressfiltering; ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, @@ -3307,17 +3315,21 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return -ENOMEM; } p_init_enet_pram = - (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset); + (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); /* Copy shadow InitEnet command parameter structure into PRAM */ - p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; - p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; - p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; - p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; + out_8(&p_init_enet_pram->resinit1, + ugeth->p_init_enet_param_shadow->resinit1); + out_8(&p_init_enet_pram->resinit2, + ugeth->p_init_enet_param_shadow->resinit2); + out_8(&p_init_enet_pram->resinit3, + ugeth->p_init_enet_param_shadow->resinit3); + out_8(&p_init_enet_pram->resinit4, + ugeth->p_init_enet_param_shadow->resinit4); out_be16(&p_init_enet_pram->resinit5, ugeth->p_init_enet_param_shadow->resinit5); - p_init_enet_pram->largestexternallookupkeysize = - ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; + out_8(&p_init_enet_pram->largestexternallookupkeysize, + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); out_be32(&p_init_enet_pram->rgftgfrxglobal, ugeth->p_init_enet_param_shadow->rgftgfrxglobal); for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) @@ -3371,7 +3383,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) #ifdef CONFIG_UGETH_TX_ON_DEMAND struct ucc_fast_private *uccf; #endif - u8 *bd; /* BD pointer */ + u8 __iomem *bd; /* BD pointer */ u32 bd_status; u8 txQ = 0; @@ -3383,7 +3395,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Start from the next BD that should be filled */ bd = ugeth->txBd[txQ]; - bd_status = in_be32((u32 *)bd); + bd_status = in_be32((u32 __iomem *)bd); /* Save the skb pointer so we can free it later */ ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; @@ -3393,7 +3405,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); /* set up the buffer descriptor */ - out_be32(&((struct qe_bd *)bd)->buf, + out_be32(&((struct qe_bd __iomem *)bd)->buf, dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ @@ -3401,7 +3413,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; /* set bd status and length */ - out_be32((u32 *)bd, bd_status); + out_be32((u32 __iomem *)bd, bd_status); dev->trans_start = jiffies; @@ -3441,7 +3453,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) { struct sk_buff *skb; - u8 *bd; + u8 __iomem *bd; u16 length, howmany = 0; u32 bd_status; u8 *bdBuffer; @@ -3454,11 +3466,11 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit /* collect received buffers */ bd = ugeth->rxBd[rxQ]; - bd_status = in_be32((u32 *)bd); + bd_status = in_be32((u32 __iomem *)bd); /* while there are received buffers and BD is full (~R_E) */ while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { - bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf); + bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; @@ -3516,7 +3528,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit else bd += sizeof(struct qe_bd); - bd_status = in_be32((u32 *)bd); + bd_status = in_be32((u32 __iomem *)bd); } ugeth->rxBd[rxQ] = bd; @@ -3527,11 +3539,11 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) { /* Start from the next BD that should be filled */ struct ucc_geth_private *ugeth = netdev_priv(dev); - u8 *bd; /* BD pointer */ + u8 __iomem *bd; /* BD pointer */ u32 bd_status; bd = ugeth->confBd[txQ]; - bd_status = in_be32((u32 *)bd); + bd_status = in_be32((u32 __iomem *)bd); /* Normal processing. */ while ((bd_status & T_R) == 0) { @@ -3561,7 +3573,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) bd += sizeof(struct qe_bd); else bd = ugeth->p_tx_bd_ring[txQ]; - bd_status = in_be32((u32 *)bd); + bd_status = in_be32((u32 __iomem *)bd); } ugeth->confBd[txQ] = bd; return 0; @@ -3910,7 +3922,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma return -EINVAL; } } else { - prop = of_get_property(np, "rx-clock", NULL); + prop = of_get_property(np, "tx-clock", NULL); if (!prop) { printk(KERN_ERR "ucc_geth: mising tx-clock-name property\n"); diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index 9f8b7580a3a..abc0e224263 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -700,8 +700,8 @@ struct ucc_geth_82xx_address_filtering_pram { u32 iaddr_l; /* individual address filter, low */ u32 gaddr_h; /* group address filter, high */ u32 gaddr_l; /* group address filter, low */ - struct ucc_geth_82xx_enet_address taddr; - struct ucc_geth_82xx_enet_address paddr[NUM_OF_PADDRS]; + struct ucc_geth_82xx_enet_address __iomem taddr; + struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; u8 res0[0x40 - 0x38]; } __attribute__ ((packed)); @@ -1186,40 +1186,40 @@ struct ucc_geth_private { struct ucc_fast_private *uccf; struct net_device *dev; struct napi_struct napi; - struct ucc_geth *ug_regs; + struct ucc_geth __iomem *ug_regs; struct ucc_geth_init_pram *p_init_enet_param_shadow; - struct ucc_geth_exf_global_pram *p_exf_glbl_param; + struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param; u32 exf_glbl_param_offset; - struct ucc_geth_rx_global_pram *p_rx_glbl_pram; + struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram; u32 rx_glbl_pram_offset; - struct ucc_geth_tx_global_pram *p_tx_glbl_pram; + struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram; u32 tx_glbl_pram_offset; - struct ucc_geth_send_queue_mem_region *p_send_q_mem_reg; + struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg; u32 send_q_mem_reg_offset; - struct ucc_geth_thread_data_tx *p_thread_data_tx; + struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx; u32 thread_dat_tx_offset; - struct ucc_geth_thread_data_rx *p_thread_data_rx; + struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx; u32 thread_dat_rx_offset; - struct ucc_geth_scheduler *p_scheduler; + struct ucc_geth_scheduler __iomem *p_scheduler; u32 scheduler_offset; - struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; + struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram; u32 tx_fw_statistics_pram_offset; - struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; + struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram; u32 rx_fw_statistics_pram_offset; - struct ucc_geth_rx_interrupt_coalescing_table *p_rx_irq_coalescing_tbl; + struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl; u32 rx_irq_coalescing_tbl_offset; - struct ucc_geth_rx_bd_queues_entry *p_rx_bd_qs_tbl; + struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl; u32 rx_bd_qs_tbl_offset; - u8 *p_tx_bd_ring[NUM_TX_QUEUES]; + u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES]; u32 tx_bd_ring_offset[NUM_TX_QUEUES]; - u8 *p_rx_bd_ring[NUM_RX_QUEUES]; + u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES]; u32 rx_bd_ring_offset[NUM_RX_QUEUES]; - u8 *confBd[NUM_TX_QUEUES]; - u8 *txBd[NUM_TX_QUEUES]; - u8 *rxBd[NUM_RX_QUEUES]; + u8 __iomem *confBd[NUM_TX_QUEUES]; + u8 __iomem *txBd[NUM_TX_QUEUES]; + u8 __iomem *rxBd[NUM_RX_QUEUES]; int badFrame[NUM_RX_QUEUES]; u16 cpucount[NUM_TX_QUEUES]; - volatile u16 *p_cpucount[NUM_TX_QUEUES]; + u16 __iomem *p_cpucount[NUM_TX_QUEUES]; int indAddrRegUsed[NUM_OF_PADDRS]; u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ u8 numGroupAddrInHash; @@ -1251,4 +1251,12 @@ struct ucc_geth_private { int oldlink; }; +void uec_set_ethtool_ops(struct net_device *netdev); +int init_flow_control_params(u32 automatic_flow_control_mode, + int rx_flow_control_enable, int tx_flow_control_enable, + u16 pause_period, u16 extension_field, + u32 __iomem *upsmr_register, u32 __iomem *uempr_register, + u32 __iomem *maccfg1_register); + + #endif /* __UCC_GETH_H__ */ diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c index 33200038a14..5f176f2b1c1 100644 --- a/drivers/net/ucc_geth_ethtool.c +++ b/drivers/net/ucc_geth_ethtool.c @@ -108,12 +108,6 @@ static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { #define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) -extern int init_flow_control_params(u32 automatic_flow_control_mode, - int rx_flow_control_enable, - int tx_flow_control_enable, u16 pause_period, - u16 extension_field, volatile u32 *upsmr_register, - volatile u32 *uempr_register, volatile u32 *maccfg1_register); - static int uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c index 2af49078100..94047473692 100644 --- a/drivers/net/ucc_geth_mii.c +++ b/drivers/net/ucc_geth_mii.c @@ -104,7 +104,7 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum) } /* Reset the MIIM registers, and wait for the bus to free */ -int uec_mdio_reset(struct mii_bus *bus) +static int uec_mdio_reset(struct mii_bus *bus) { struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv; unsigned int timeout = PHY_INIT_TIMEOUT; @@ -240,7 +240,7 @@ reg_map_fail: return err; } -int uec_mdio_remove(struct of_device *ofdev) +static int uec_mdio_remove(struct of_device *ofdev) { struct device *device = &ofdev->dev; struct mii_bus *bus = dev_get_drvdata(device); diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 6f245cfb662..dc6f097062d 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -1381,6 +1381,10 @@ static const struct usb_device_id products [] = { USB_DEVICE (0x0411, 0x003d), .driver_info = (unsigned long) &ax8817x_info, }, { + // Buffalo LUA-U2-GT 10/100/1000 + USB_DEVICE (0x0411, 0x006e), + .driver_info = (unsigned long) &ax88178_info, +}, { // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter" USB_DEVICE (0x6189, 0x182d), .driver_info = (unsigned long) &ax8817x_info, diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 8005dd16fb4..d5140aed7b7 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -150,11 +150,9 @@ config HDLC_FR config HDLC_PPP tristate "Synchronous Point-to-Point Protocol (PPP) support" - depends on HDLC && BROKEN + depends on HDLC help Generic HDLC driver supporting PPP over WAN connections. - This module is currently broken and will cause a kernel panic - when a device configured in PPP mode is activated. It will be replaced by new PPP implementation in Linux 2.6.26. diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 45ddfc9763c..b0fce1387ea 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -629,7 +629,7 @@ static void sppp_channel_init(struct channel_data *chan) d->base_addr = chan->cosa->datareg; d->irq = chan->cosa->irq; d->dma = chan->cosa->dma; - d->priv = chan; + d->ml_priv = chan; sppp_attach(&chan->pppdev); if (register_netdev(d)) { printk(KERN_WARNING "%s: register_netdev failed.\n", d->name); @@ -650,7 +650,7 @@ static void sppp_channel_delete(struct channel_data *chan) static int cosa_sppp_open(struct net_device *d) { - struct channel_data *chan = d->priv; + struct channel_data *chan = d->ml_priv; int err; unsigned long flags; @@ -690,7 +690,7 @@ static int cosa_sppp_open(struct net_device *d) static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) { - struct channel_data *chan = dev->priv; + struct channel_data *chan = dev->ml_priv; netif_stop_queue(dev); @@ -701,7 +701,7 @@ static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) static void cosa_sppp_timeout(struct net_device *dev) { - struct channel_data *chan = dev->priv; + struct channel_data *chan = dev->ml_priv; if (test_bit(RXBIT, &chan->cosa->rxtx)) { chan->stats.rx_errors++; @@ -720,7 +720,7 @@ static void cosa_sppp_timeout(struct net_device *dev) static int cosa_sppp_close(struct net_device *d) { - struct channel_data *chan = d->priv; + struct channel_data *chan = d->ml_priv; unsigned long flags; netif_stop_queue(d); @@ -800,7 +800,7 @@ static int sppp_tx_done(struct channel_data *chan, int size) static struct net_device_stats *cosa_net_stats(struct net_device *dev) { - struct channel_data *chan = dev->priv; + struct channel_data *chan = dev->ml_priv; return &chan->stats; } @@ -1217,7 +1217,7 @@ static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int rv; - struct channel_data *chan = dev->priv; + struct channel_data *chan = dev->ml_priv; rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); if (rv == -ENOIOCTLCMD) { return sppp_do_ioctl(dev, ifr, cmd); diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 10396d9686f..00308337928 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -45,7 +45,7 @@ static int ppp_open(struct net_device *dev) int (*old_ioctl)(struct net_device *, struct ifreq *, int); int result; - dev->priv = &state(hdlc)->syncppp_ptr; + dev->ml_priv = &state(hdlc)->syncppp_ptr; state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev; state(hdlc)->pppdev.dev = dev; diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index 83dbc924fcb..f3065d3473f 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -75,7 +75,7 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) static int hostess_open(struct net_device *d) { - struct sv11_device *sv11=d->priv; + struct sv11_device *sv11=d->ml_priv; int err = -1; /* @@ -128,7 +128,7 @@ static int hostess_open(struct net_device *d) static int hostess_close(struct net_device *d) { - struct sv11_device *sv11=d->priv; + struct sv11_device *sv11=d->ml_priv; /* * Discard new frames */ @@ -159,14 +159,14 @@ static int hostess_close(struct net_device *d) static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) { - /* struct sv11_device *sv11=d->priv; + /* struct sv11_device *sv11=d->ml_priv; z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ return sppp_do_ioctl(d, ifr,cmd); } static struct net_device_stats *hostess_get_stats(struct net_device *d) { - struct sv11_device *sv11=d->priv; + struct sv11_device *sv11=d->ml_priv; if(sv11) return z8530_get_stats(&sv11->sync.chanA); else @@ -179,7 +179,7 @@ static struct net_device_stats *hostess_get_stats(struct net_device *d) static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) { - struct sv11_device *sv11=d->priv; + struct sv11_device *sv11=d->ml_priv; return z8530_queue_xmit(&sv11->sync.chanA, skb); } @@ -325,6 +325,7 @@ static struct sv11_device *sv11_init(int iobase, int irq) /* * Initialise the PPP components */ + d->ml_priv = sv; sppp_attach(&sv->netdev); /* @@ -333,7 +334,6 @@ static struct sv11_device *sv11_init(int iobase, int irq) d->base_addr = iobase; d->irq = irq; - d->priv = sv; if(register_netdev(d)) { diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index b5860b97a93..24fd613466b 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -459,6 +459,7 @@ static void __exit lapbeth_cleanup_driver(void) list_for_each_safe(entry, tmp, &lapbeth_devices) { lapbeth = list_entry(entry, struct lapbethdev, node); + dev_put(lapbeth->ethdev); unregister_netdevice(lapbeth->axdev); } rtnl_unlock(); diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 6635ecef36e..62133cee446 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -891,6 +891,7 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, /* Initialize the sppp layer */ /* An ioctl can cause a subsequent detach for raw frame interface */ + dev->ml_priv = sc; sc->if_type = LMC_PPP; sc->check = 0xBEAFCAFE; dev->base_addr = pci_resource_start(pdev, 0); diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 11276bf3149..44a89df1b8b 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -241,6 +241,7 @@ static inline struct slvl_device *slvl_alloc(int iobase, int irq) return NULL; sv = d->priv; + d->ml_priv = sv; sv->if_ptr = &sv->pppdev; sv->pppdev.dev = d; d->base_addr = iobase; diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index cc0006900b3..995dd537083 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -1,6 +1,5 @@ config IWLWIFI - bool - default n + tristate config IWLCORE tristate "Intel Wireless Wifi Core" diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 8464397f781..9df48b33f0c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -666,7 +666,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv, rx_status.flag = 0; rx_status.mactime = le64_to_cpu(rx_end->timestamp); rx_status.freq = - ieee80211_frequency_to_channel(le16_to_cpu(rx_hdr->channel)); + ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel)); rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c index 67356d9a0c5..24dee00b0e8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c @@ -168,8 +168,8 @@ struct iwl4965_lq_sta { struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; #endif u32 dbg_fixed_rate; - struct iwl_priv *drv; #endif + struct iwl_priv *drv; }; static void rs_rate_scale_perform(struct iwl_priv *priv, diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 9546582e983..5d675e39bab 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -3128,7 +3128,7 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv, rx_status.mactime = le64_to_cpu(rx_start->timestamp); rx_status.freq = - ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel)); + ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel)); rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; rx_status.rate_idx = diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 04c2638d75a..9196825ed1b 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -388,8 +388,15 @@ islpci_open(struct net_device *ndev) netif_start_queue(ndev); - /* Turn off carrier unless we know we have associated */ - netif_carrier_off(ndev); + /* Turn off carrier if in STA or Ad-hoc mode. It will be turned on + * once the firmware receives a trap of being associated + * (GEN_OID_LINKSTATE). In other modes (AP or WDS or monitor) we + * should just leave the carrier on as its expected the firmware + * won't send us a trigger. */ + if (priv->iw_mode == IW_MODE_INFRA || priv->iw_mode == IW_MODE_ADHOC) + netif_carrier_off(ndev); + else + netif_carrier_on(ndev); return 0; } diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 9929b15e28b..61510c51935 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -1028,8 +1028,10 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) * Initialize the device. */ status = rt2x00dev->ops->lib->initialize(rt2x00dev); - if (status) - goto exit; + if (status) { + rt2x00queue_uninitialize(rt2x00dev); + return status; + } __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags); @@ -1039,11 +1041,6 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) rt2x00rfkill_register(rt2x00dev); return 0; - -exit: - rt2x00lib_uninitialize(rt2x00dev); - - return status; } int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 9e5d94e44c5..c17078eac19 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -314,13 +314,14 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) if (status) { ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", pci_dev->irq, status); - return status; + goto exit; } return 0; exit: - rt2x00pci_uninitialize(rt2x00dev); + queue_for_each(rt2x00dev, queue) + rt2x00pci_free_queue_dma(rt2x00dev, queue); return status; } diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 98af4d26583..b64f2f5d0d5 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -2362,6 +2362,7 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_intf *intf = vif_to_intf(control->vif); + struct queue_entry_priv_pci_tx *priv_tx; struct skb_frame_desc *skbdesc; unsigned int beacon_base; u32 reg; @@ -2369,21 +2370,8 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, if (unlikely(!intf->beacon)) return -ENOBUFS; - /* - * We need to append the descriptor in front of the - * beacon frame. - */ - if (skb_headroom(skb) < intf->beacon->queue->desc_size) { - if (pskb_expand_head(skb, intf->beacon->queue->desc_size, - 0, GFP_ATOMIC)) - return -ENOMEM; - } - - /* - * Add the descriptor in front of the skb. - */ - skb_push(skb, intf->beacon->queue->desc_size); - memset(skb->data, 0, intf->beacon->queue->desc_size); + priv_tx = intf->beacon->priv_data; + memset(priv_tx->desc, 0, intf->beacon->queue->desc_size); /* * Fill in skb descriptor @@ -2391,9 +2379,9 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, skbdesc = get_skb_frame_desc(skb); memset(skbdesc, 0, sizeof(*skbdesc)); skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; - skbdesc->data = skb->data + intf->beacon->queue->desc_size; - skbdesc->data_len = skb->len - intf->beacon->queue->desc_size; - skbdesc->desc = skb->data; + skbdesc->data = skb->data; + skbdesc->data_len = skb->len; + skbdesc->desc = priv_tx->desc; skbdesc->desc_len = intf->beacon->queue->desc_size; skbdesc->entry = intf->beacon; @@ -2414,7 +2402,10 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, rt2x00lib_write_tx_desc(rt2x00dev, skb, control); beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, - skb->data, skb->len); + skbdesc->desc, skbdesc->desc_len); + rt2x00pci_register_multiwrite(rt2x00dev, + beacon_base + skbdesc->desc_len, + skbdesc->data, skbdesc->data_len); rt61pci_kick_tx_queue(rt2x00dev, QID_BEACON); return 0; @@ -2479,7 +2470,7 @@ static const struct data_queue_desc rt61pci_queue_tx = { static const struct data_queue_desc rt61pci_queue_bcn = { .entry_num = 4 * BEACON_ENTRIES, - .data_size = MGMT_FRAME_SIZE, + .data_size = 0, /* No DMA required for beacons */ .desc_size = TXINFO_SIZE, .priv_size = sizeof(struct queue_entry_priv_pci_tx), }; diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index 5dd23c93497..883af891ebf 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -2611,7 +2611,7 @@ static int strip_open(struct tty_struct *tty) * We need a write method. */ - if (tty->ops->write == NULL) + if (tty->ops->write == NULL || tty->ops->set_termios == NULL) return -EOPNOTSUPP; /* diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 03384a43186..49ae9700395 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c @@ -908,9 +908,9 @@ static void wv_psa_show(psa_t * p) p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5], p->psa_call_code[6], p->psa_call_code[7]); #ifdef DEBUG_SHOW_UNUSED - printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n", + printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n", p->psa_reserved[0], - p->psa_reserved[1], p->psa_reserved[2], p->psa_reserved[3]); + p->psa_reserved[1]); #endif /* DEBUG_SHOW_UNUSED */ printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]); diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index baf74015751..b584c0ecc62 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -1074,11 +1074,9 @@ wv_psa_show(psa_t * p) p->psa_call_code[6], p->psa_call_code[7]); #ifdef DEBUG_SHOW_UNUSED - printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n", + printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n", p->psa_reserved[0], - p->psa_reserved[1], - p->psa_reserved[2], - p->psa_reserved[3]); + p->psa_reserved[1]); #endif /* DEBUG_SHOW_UNUSED */ printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]); diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 5316074f39f..12e24f04ddd 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -889,9 +889,13 @@ static void tx_urb_complete(struct urb *urb) } free_urb: skb = (struct sk_buff *)urb->context; - zd_mac_tx_to_dev(skb, urb->status); + /* + * grab 'usb' pointer before handing off the skb (since + * it might be freed by zd_mac_tx_to_dev or mac80211) + */ cb = (struct zd_tx_skb_control_block *)skb->cb; usb = &zd_hw_mac(cb->hw)->chip.usb; + zd_mac_tx_to_dev(skb, urb->status); free_tx_urb(usb, urb); tx_dec_submitted_urbs(usb); return; |