diff options
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r-- | drivers/net/skge.c | 206 |
1 files changed, 98 insertions, 108 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index b3d6fa3d6df..9142d91355b 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -43,7 +43,7 @@ #include "skge.h" #define DRV_NAME "skge" -#define DRV_VERSION "1.6" +#define DRV_VERSION "1.8" #define PFX DRV_NAME " " #define DEFAULT_TX_RING_SIZE 128 @@ -91,7 +91,7 @@ MODULE_DEVICE_TABLE(pci, skge_id_table); static int skge_up(struct net_device *dev); static int skge_down(struct net_device *dev); static void skge_phy_reset(struct skge_port *skge); -static void skge_tx_clean(struct skge_port *skge); +static void skge_tx_clean(struct net_device *dev); static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); static void genesis_get_stats(struct skge_port *skge, u64 *data); @@ -105,6 +105,7 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 }; static const int rxqaddr[] = { Q_R1, Q_R2 }; static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; +static const u32 irqmask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; static int skge_get_regs_len(struct net_device *dev) { @@ -690,7 +691,7 @@ static int skge_phys_id(struct net_device *dev, u32 data) return 0; } -static struct ethtool_ops skge_ethtool_ops = { +static const struct ethtool_ops skge_ethtool_ops = { .get_settings = skge_get_settings, .set_settings = skge_set_settings, .get_drvinfo = skge_get_drvinfo, @@ -818,8 +819,9 @@ static void skge_rx_clean(struct skge_port *skge) /* Allocate buffers for receive ring * For receive: to_clean is next received frame. */ -static int skge_rx_fill(struct skge_port *skge) +static int skge_rx_fill(struct net_device *dev) { + struct skge_port *skge = netdev_priv(dev); struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; @@ -827,7 +829,8 @@ static int skge_rx_fill(struct skge_port *skge) do { struct sk_buff *skb; - skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL); + skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, + GFP_KERNEL); if (!skb) return -ENOMEM; @@ -2178,7 +2181,7 @@ static int skge_up(struct net_device *dev) if (err) goto free_pci_mem; - err = skge_rx_fill(skge); + err = skge_rx_fill(dev); if (err) goto free_rx_ring; @@ -2281,7 +2284,7 @@ static int skge_down(struct net_device *dev) skge_led(skge, LED_MODE_OFF); netif_poll_disable(dev); - skge_tx_clean(skge); + skge_tx_clean(dev); skge_rx_clean(skge); kfree(skge->rx_ring.start); @@ -2306,25 +2309,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) int i; u32 control, len; u64 map; - unsigned long flags; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; - if (!spin_trylock_irqsave(&skge->tx_lock, flags)) - /* Collision - tell upper layer to requeue */ - return NETDEV_TX_LOCKED; - - if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) { - if (!netif_queue_stopped(dev)) { - netif_stop_queue(dev); - - printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", - dev->name); - } - spin_unlock_irqrestore(&skge->tx_lock, flags); + if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) return NETDEV_TX_BUSY; - } e = skge->tx_ring.to_use; td = e->desc; @@ -2399,8 +2389,6 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); } - spin_unlock_irqrestore(&skge->tx_lock, flags); - dev->trans_start = jiffies; return NETDEV_TX_OK; @@ -2430,18 +2418,18 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e, printk(KERN_DEBUG PFX "%s: tx done slot %td\n", skge->netdev->name, e - skge->tx_ring.start); - dev_kfree_skb_any(e->skb); + dev_kfree_skb(e->skb); } e->skb = NULL; } /* Free all buffers in transmit ring */ -static void skge_tx_clean(struct skge_port *skge) +static void skge_tx_clean(struct net_device *dev) { + struct skge_port *skge = netdev_priv(dev); struct skge_element *e; - unsigned long flags; - spin_lock_irqsave(&skge->tx_lock, flags); + netif_tx_lock_bh(dev); for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { struct skge_tx_desc *td = e->desc; skge_tx_free(skge, e, td->control); @@ -2449,8 +2437,8 @@ static void skge_tx_clean(struct skge_port *skge) } skge->tx_ring.to_clean = e; - netif_wake_queue(skge->netdev); - spin_unlock_irqrestore(&skge->tx_lock, flags); + netif_wake_queue(dev); + netif_tx_unlock_bh(dev); } static void skge_tx_timeout(struct net_device *dev) @@ -2461,7 +2449,7 @@ static void skge_tx_timeout(struct net_device *dev) printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name); skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); - skge_tx_clean(skge); + skge_tx_clean(dev); } static int skge_change_mtu(struct net_device *dev, int new_mtu) @@ -2584,16 +2572,17 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status) /* Get receive buffer from descriptor. * Handles copy of small buffers and reallocation failures */ -static inline struct sk_buff *skge_rx_get(struct skge_port *skge, - struct skge_element *e, - u32 control, u32 status, u16 csum) +static struct sk_buff *skge_rx_get(struct net_device *dev, + struct skge_element *e, + u32 control, u32 status, u16 csum) { + struct skge_port *skge = netdev_priv(dev); struct sk_buff *skb; u16 len = control & BMU_BBC; if (unlikely(netif_msg_rx_status(skge))) printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", - skge->netdev->name, e - skge->rx_ring.start, + dev->name, e - skge->rx_ring.start, status, len); if (len > skge->rx_buf_size) @@ -2609,7 +2598,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, goto error; if (len < RX_COPY_THRESHOLD) { - skb = alloc_skb(len + 2, GFP_ATOMIC); + skb = netdev_alloc_skb(dev, len + 2); if (!skb) goto resubmit; @@ -2624,7 +2613,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, skge_rx_reuse(e, skge->rx_buf_size); } else { struct sk_buff *nskb; - nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC); + nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN); if (!nskb) goto resubmit; @@ -2639,20 +2628,19 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, } skb_put(skb, len); - skb->dev = skge->netdev; if (skge->rx_csum) { skb->csum = csum; skb->ip_summed = CHECKSUM_COMPLETE; } - skb->protocol = eth_type_trans(skb, skge->netdev); + skb->protocol = eth_type_trans(skb, dev); return skb; error: if (netif_msg_rx_err(skge)) printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n", - skge->netdev->name, e - skge->rx_ring.start, + dev->name, e - skge->rx_ring.start, control, status); if (skge->hw->chip_id == CHIP_ID_GENESIS) { @@ -2677,15 +2665,15 @@ resubmit: } /* Free all buffers in Tx ring which are no longer owned by device */ -static void skge_txirq(struct net_device *dev) +static void skge_tx_done(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_ring *ring = &skge->tx_ring; struct skge_element *e; - rmb(); + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); - spin_lock(&skge->tx_lock); + netif_tx_lock(dev); for (e = ring->to_clean; e != ring->to_use; e = e->next) { struct skge_tx_desc *td = e->desc; @@ -2696,11 +2684,10 @@ static void skge_txirq(struct net_device *dev) } skge->tx_ring.to_clean = e; - if (netif_queue_stopped(skge->netdev) - && skge_avail(&skge->tx_ring) > TX_LOW_WATER) - netif_wake_queue(skge->netdev); + if (skge_avail(&skge->tx_ring) > TX_LOW_WATER) + netif_wake_queue(dev); - spin_unlock(&skge->tx_lock); + netif_tx_unlock(dev); } static int skge_poll(struct net_device *dev, int *budget) @@ -2712,6 +2699,10 @@ static int skge_poll(struct net_device *dev, int *budget) int to_do = min(dev->quota, *budget); int work_done = 0; + skge_tx_done(dev); + + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { struct skge_rx_desc *rd = e->desc; struct sk_buff *skb; @@ -2722,7 +2713,7 @@ static int skge_poll(struct net_device *dev, int *budget) if (control & BMU_OWN) break; - skb = skge_rx_get(skge, e, control, rd->status, rd->csum2); + skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); if (likely(skb)) { dev->last_rx = jiffies; netif_receive_skb(skb); @@ -2742,12 +2733,11 @@ static int skge_poll(struct net_device *dev, int *budget) if (work_done >= to_do) return 1; /* not done */ - netif_rx_complete(dev); - spin_lock_irq(&hw->hw_lock); - hw->intr_mask |= rxirqmask[skge->port]; + __netif_rx_complete(dev); + hw->intr_mask |= irqmask[skge->port]; skge_write32(hw, B0_IMSK, hw->intr_mask); - mmiowb(); + skge_read32(hw, B0_IMSK); spin_unlock_irq(&hw->hw_lock); return 0; @@ -2881,6 +2871,7 @@ static void skge_extirq(void *arg) spin_lock_irq(&hw->hw_lock); hw->intr_mask |= IS_EXT_REG; skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); spin_unlock_irq(&hw->hw_lock); } @@ -2888,27 +2879,23 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) { struct skge_hw *hw = dev_id; u32 status; + int handled = 0; + spin_lock(&hw->hw_lock); /* Reading this register masks IRQ */ status = skge_read32(hw, B0_SP_ISRC); - if (status == 0) - return IRQ_NONE; + if (status == 0 || status == ~0) + goto out; - spin_lock(&hw->hw_lock); + handled = 1; status &= hw->intr_mask; if (status & IS_EXT_REG) { hw->intr_mask &= ~IS_EXT_REG; schedule_work(&hw->phy_work); } - if (status & IS_XA1_F) { - skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F); - skge_txirq(hw->dev[0]); - } - - if (status & IS_R1_F) { - skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); - hw->intr_mask &= ~IS_R1_F; + if (status & (IS_XA1_F|IS_R1_F)) { + hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); netif_rx_schedule(hw->dev[0]); } @@ -2927,14 +2914,8 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) skge_mac_intr(hw, 0); if (hw->dev[1]) { - if (status & IS_XA2_F) { - skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F); - skge_txirq(hw->dev[1]); - } - - if (status & IS_R2_F) { - skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); - hw->intr_mask &= ~IS_R2_F; + if (status & (IS_XA2_F|IS_R2_F)) { + hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); netif_rx_schedule(hw->dev[1]); } @@ -2955,9 +2936,11 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) skge_error_irq(hw); skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); +out: spin_unlock(&hw->hw_lock); - return IRQ_HANDLED; + return IRQ_RETVAL(handled); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -3106,7 +3089,6 @@ static int skge_reset(struct skge_hw *hw) else hw->ram_size = t8 * 4096; - spin_lock_init(&hw->hw_lock); hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; if (hw->ports > 1) hw->intr_mask |= IS_PORT_2; @@ -3222,7 +3204,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, dev->poll_controller = skge_netpoll; #endif dev->irq = hw->pdev->irq; - dev->features = NETIF_F_LLTX; + if (highmem) dev->features |= NETIF_F_HIGHDMA; @@ -3244,8 +3226,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, skge->port = port; - spin_lock_init(&skge->tx_lock); - if (hw->chip_id != CHIP_ID_GENESIS) { dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; skge->rx_csum = 1; @@ -3332,6 +3312,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, hw->pdev = pdev; mutex_init(&hw->phy_mutex); INIT_WORK(&hw->phy_work, skge_extirq, hw); + spin_lock_init(&hw->hw_lock); hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); if (!hw->regs) { @@ -3340,23 +3321,16 @@ static int __devinit skge_probe(struct pci_dev *pdev, goto err_out_free_hw; } - err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, DRV_NAME, hw); - if (err) { - printk(KERN_ERR PFX "%s: cannot assign irq %d\n", - pci_name(pdev), pdev->irq); - goto err_out_iounmap; - } - pci_set_drvdata(pdev, hw); - err = skge_reset(hw); if (err) - goto err_out_free_irq; + goto err_out_iounmap; printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n", (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, skge_board_name(hw), hw->chip_rev); - if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) + dev = skge_devinit(hw, 0, using_dac); + if (!dev) goto err_out_led_off; if (!is_valid_ether_addr(dev->dev_addr)) { @@ -3366,7 +3340,6 @@ static int __devinit skge_probe(struct pci_dev *pdev, goto err_out_free_netdev; } - err = register_netdev(dev); if (err) { printk(KERN_ERR PFX "%s: cannot register net device\n", @@ -3374,6 +3347,12 @@ static int __devinit skge_probe(struct pci_dev *pdev, goto err_out_free_netdev; } + err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); + if (err) { + printk(KERN_ERR PFX "%s: cannot assign irq %d\n", + dev->name, pdev->irq); + goto err_out_unregister; + } skge_show_addr(dev); if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { @@ -3386,15 +3365,16 @@ static int __devinit skge_probe(struct pci_dev *pdev, free_netdev(dev1); } } + pci_set_drvdata(pdev, hw); return 0; +err_out_unregister: + unregister_netdev(dev); err_out_free_netdev: free_netdev(dev); err_out_led_off: skge_write16(hw, B0_LED, LED_STAT_OFF); -err_out_free_irq: - free_irq(pdev->irq, hw); err_out_iounmap: iounmap(hw->regs); err_out_free_hw: @@ -3424,6 +3404,7 @@ static void __devexit skge_remove(struct pci_dev *pdev) spin_lock_irq(&hw->hw_lock); hw->intr_mask = 0; skge_write32(hw, B0_IMSK, 0); + skge_read32(hw, B0_IMSK); spin_unlock_irq(&hw->hw_lock); skge_write16(hw, B0_LED, LED_STAT_OFF); @@ -3449,26 +3430,25 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state) struct skge_hw *hw = pci_get_drvdata(pdev); int i, wol = 0; - for (i = 0; i < 2; i++) { + pci_save_state(pdev); + for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; - if (dev) { + if (netif_running(dev)) { struct skge_port *skge = netdev_priv(dev); - if (netif_running(dev)) { - netif_carrier_off(dev); - if (skge->wol) - netif_stop_queue(dev); - else - skge_down(dev); - } - netif_device_detach(dev); + + netif_carrier_off(dev); + if (skge->wol) + netif_stop_queue(dev); + else + skge_down(dev); wol |= skge->wol; } + netif_device_detach(dev); } - pci_save_state(pdev); + skge_write32(hw, B0_IMSK, 0); pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); - pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; @@ -3477,23 +3457,33 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state) static int skge_resume(struct pci_dev *pdev) { struct skge_hw *hw = pci_get_drvdata(pdev); - int i; + int i, err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_enable_wake(pdev, PCI_D0, 0); - skge_reset(hw); + err = skge_reset(hw); + if (err) + goto out; - for (i = 0; i < 2; i++) { + for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; - if (dev) { - netif_device_attach(dev); - if (netif_running(dev) && skge_up(dev)) + + netif_device_attach(dev); + if (netif_running(dev)) { + err = skge_up(dev); + + if (err) { + printk(KERN_ERR PFX "%s: could not up: %d\n", + dev->name, err); dev_close(dev); + goto out; + } } } - return 0; +out: + return err; } #endif @@ -3510,7 +3500,7 @@ static struct pci_driver skge_driver = { static int __init skge_init_module(void) { - return pci_module_init(&skge_driver); + return pci_register_driver(&skge_driver); } static void __exit skge_cleanup_module(void) |