diff options
author | David S. Miller <davem@davemloft.net> | 2009-09-24 15:13:11 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-09-24 15:13:11 -0700 |
commit | 8b3f6af86378d0a10ca2f1ded1da124aef13b62c (patch) | |
tree | de6ca90295730343c495be8d98be8efa322140ef /drivers/net | |
parent | 139d6065c83071d5f66cd013a274a43699f8e2c1 (diff) | |
parent | 94e0fb086fc5663c38bbc0fe86d698be8314f82f (diff) |
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts:
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/cpc-usb/TODO
drivers/staging/cpc-usb/cpc-usb_drv.c
drivers/staging/cpc-usb/cpc.h
drivers/staging/cpc-usb/cpc_int.h
drivers/staging/cpc-usb/cpcusb.h
Diffstat (limited to 'drivers/net')
43 files changed, 2386 insertions, 158 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f9fbd52b799..2bea67c134f 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -209,7 +209,7 @@ config MII config MACB tristate "Atmel MACB support" - depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91SAM9G20 || ARCH_AT91CAP9 + depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91SAM9G20 || ARCH_AT91SAM9G45 || ARCH_AT91CAP9 select PHYLIB help The Atmel MACB ethernet interface is found on many AT32 and AT91 @@ -1934,6 +1934,15 @@ config XILINX_EMACLITE help This driver supports the 10/100 Ethernet Lite from Xilinx. +config BCM63XX_ENET + tristate "Broadcom 63xx internal mac support" + depends on BCM63XX + select MII + select PHYLIB + help + This driver supports the ethernet MACs in the Broadcom 63xx + MIPS chipset family (BCM63XX). + source "drivers/net/fs_enet/Kconfig" endif # NET_ETHERNET diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 99ae6d7fe6a..ae8cd30f13d 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -137,6 +137,7 @@ obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_FORCEDETH) += forcedeth.o obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o obj-$(CONFIG_AX88796) += ax88796.o +obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index 646dfc5f50c..8ea9c7545c1 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c @@ -123,7 +123,6 @@ static void rx(struct net_device *dev, int bufnum, BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); skb->protocol = cpu_to_be16(ETH_P_ARCNET); -; netif_rx(skb); } diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 083e21094b2..66bcbbb6bab 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -149,7 +149,6 @@ static void rx(struct net_device *dev, int bufnum, BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); skb->protocol = cpu_to_be16(ETH_P_ARCNET); -; netif_rx(skb); } diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c new file mode 100644 index 00000000000..09d270913c5 --- /dev/null +++ b/drivers/net/bcm63xx_enet.c @@ -0,0 +1,1971 @@ +/* + * Driver for BCM963xx builtin Ethernet mac + * + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/crc32.h> +#include <linux/err.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/if_vlan.h> + +#include <bcm63xx_dev_enet.h> +#include "bcm63xx_enet.h" + +static char bcm_enet_driver_name[] = "bcm63xx_enet"; +static char bcm_enet_driver_version[] = "1.0"; + +static int copybreak __read_mostly = 128; +module_param(copybreak, int, 0); +MODULE_PARM_DESC(copybreak, "Receive copy threshold"); + +/* io memory shared between all devices */ +static void __iomem *bcm_enet_shared_base; + +/* + * io helpers to access mac registers + */ +static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(priv->base + off); +} + +static inline void enet_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, priv->base + off); +} + +/* + * io helpers to access shared registers + */ +static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(bcm_enet_shared_base + off); +} + +static inline void enet_dma_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, bcm_enet_shared_base + off); +} + +/* + * write given data into mii register and wait for transfer to end + * with timeout (average measured transfer time is 25us) + */ +static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) +{ + int limit; + + /* make sure mii interrupt status is cleared */ + enet_writel(priv, ENET_IR_MII, ENET_IR_REG); + + enet_writel(priv, data, ENET_MIIDATA_REG); + wmb(); + + /* busy wait on mii interrupt bit, with timeout */ + limit = 1000; + do { + if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) + break; + udelay(1); + } while (limit-- >= 0); + + return (limit < 0) ? 1 : 0; +} + +/* + * MII internal read callback + */ +static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, + int regnum) +{ + u32 tmp, val; + + tmp = regnum << ENET_MIIDATA_REG_SHIFT; + tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; + tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; + tmp |= ENET_MIIDATA_OP_READ_MASK; + + if (do_mdio_op(priv, tmp)) + return -1; + + val = enet_readl(priv, ENET_MIIDATA_REG); + val &= 0xffff; + return val; +} + +/* + * MII internal write callback + */ +static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, + int regnum, u16 value) +{ + u32 tmp; + + tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; + tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; + tmp |= regnum << ENET_MIIDATA_REG_SHIFT; + tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; + tmp |= ENET_MIIDATA_OP_WRITE_MASK; + + (void)do_mdio_op(priv, tmp); + return 0; +} + +/* + * MII read callback from phylib + */ +static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, + int regnum) +{ + return bcm_enet_mdio_read(bus->priv, mii_id, regnum); +} + +/* + * MII write callback from phylib + */ +static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); +} + +/* + * MII read callback from mii core + */ +static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, + int regnum) +{ + return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); +} + +/* + * MII write callback from mii core + */ +static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, + int regnum, int value) +{ + bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); +} + +/* + * refill rx queue + */ +static int bcm_enet_refill_rx(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + while (priv->rx_desc_count < priv->rx_ring_size) { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + dma_addr_t p; + int desc_idx; + u32 len_stat; + + desc_idx = priv->rx_dirty_desc; + desc = &priv->rx_desc_cpu[desc_idx]; + + if (!priv->rx_skb[desc_idx]) { + skb = netdev_alloc_skb(dev, priv->rx_skb_size); + if (!skb) + break; + priv->rx_skb[desc_idx] = skb; + + p = dma_map_single(&priv->pdev->dev, skb->data, + priv->rx_skb_size, + DMA_FROM_DEVICE); + desc->address = p; + } + + len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; + len_stat |= DMADESC_OWNER_MASK; + if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { + len_stat |= DMADESC_WRAP_MASK; + priv->rx_dirty_desc = 0; + } else { + priv->rx_dirty_desc++; + } + wmb(); + desc->len_stat = len_stat; + + priv->rx_desc_count++; + + /* tell dma engine we allocated one buffer */ + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + } + + /* If rx ring is still empty, set a timer to try allocating + * again at a later time. */ + if (priv->rx_desc_count == 0 && netif_running(dev)) { + dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); + priv->rx_timeout.expires = jiffies + HZ; + add_timer(&priv->rx_timeout); + } + + return 0; +} + +/* + * timer callback to defer refill rx queue in case we're OOM + */ +static void bcm_enet_refill_rx_timer(unsigned long data) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + + dev = (struct net_device *)data; + priv = netdev_priv(dev); + + spin_lock(&priv->rx_lock); + bcm_enet_refill_rx((struct net_device *)data); + spin_unlock(&priv->rx_lock); +} + +/* + * extract packet from rx queue + */ +static int bcm_enet_receive_queue(struct net_device *dev, int budget) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int processed; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + processed = 0; + + /* don't scan ring further than number of refilled + * descriptor */ + if (budget > priv->rx_desc_count) + budget = priv->rx_desc_count; + + do { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + int desc_idx; + u32 len_stat; + unsigned int len; + + desc_idx = priv->rx_curr_desc; + desc = &priv->rx_desc_cpu[desc_idx]; + + /* make sure we actually read the descriptor status at + * each loop */ + rmb(); + + len_stat = desc->len_stat; + + /* break if dma ownership belongs to hw */ + if (len_stat & DMADESC_OWNER_MASK) + break; + + processed++; + priv->rx_curr_desc++; + if (priv->rx_curr_desc == priv->rx_ring_size) + priv->rx_curr_desc = 0; + priv->rx_desc_count--; + + /* if the packet does not have start of packet _and_ + * end of packet flag set, then just recycle it */ + if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { + priv->stats.rx_dropped++; + continue; + } + + /* recycle packet if it's marked as bad */ + if (unlikely(len_stat & DMADESC_ERR_MASK)) { + priv->stats.rx_errors++; + + if (len_stat & DMADESC_OVSIZE_MASK) + priv->stats.rx_length_errors++; + if (len_stat & DMADESC_CRC_MASK) + priv->stats.rx_crc_errors++; + if (len_stat & DMADESC_UNDER_MASK) + priv->stats.rx_frame_errors++; + if (len_stat & DMADESC_OV_MASK) + priv->stats.rx_fifo_errors++; + continue; + } + + /* valid packet */ + skb = priv->rx_skb[desc_idx]; + len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; + /* don't include FCS */ + len -= 4; + + if (len < copybreak) { + struct sk_buff *nskb; + + nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN); + if (!nskb) { + /* forget packet, just rearm desc */ + priv->stats.rx_dropped++; + continue; + } + + /* since we're copying the data, we can align + * them properly */ + skb_reserve(nskb, NET_IP_ALIGN); + dma_sync_single_for_cpu(kdev, desc->address, + len, DMA_FROM_DEVICE); + memcpy(nskb->data, skb->data, len); + dma_sync_single_for_device(kdev, desc->address, + len, DMA_FROM_DEVICE); + skb = nskb; + } else { + dma_unmap_single(&priv->pdev->dev, desc->address, + priv->rx_skb_size, DMA_FROM_DEVICE); + priv->rx_skb[desc_idx] = NULL; + } + + skb_put(skb, len); + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); + priv->stats.rx_packets++; + priv->stats.rx_bytes += len; + dev->last_rx = jiffies; + netif_receive_skb(skb); + + } while (--budget > 0); + + if (processed || !priv->rx_desc_count) { + bcm_enet_refill_rx(dev); + + /* kick rx dma */ + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->rx_chan)); + } + + return processed; +} + + +/* + * try to or force reclaim of transmitted buffers + */ +static int bcm_enet_tx_reclaim(struct net_device *dev, int force) +{ + struct bcm_enet_priv *priv; + int released; + + priv = netdev_priv(dev); + released = 0; + + while (priv->tx_desc_count < priv->tx_ring_size) { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + + /* We run in a bh and fight against start_xmit, which + * is called with bh disabled */ + spin_lock(&priv->tx_lock); + + desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; + + if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { + spin_unlock(&priv->tx_lock); + break; + } + + /* ensure other field of the descriptor were not read + * before we checked ownership */ + rmb(); + + skb = priv->tx_skb[priv->tx_dirty_desc]; + priv->tx_skb[priv->tx_dirty_desc] = NULL; + dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, + DMA_TO_DEVICE); + + priv->tx_dirty_desc++; + if (priv->tx_dirty_desc == priv->tx_ring_size) + priv->tx_dirty_desc = 0; + priv->tx_desc_count++; + + spin_unlock(&priv->tx_lock); + + if (desc->len_stat & DMADESC_UNDER_MASK) + priv->stats.tx_errors++; + + dev_kfree_skb(skb); + released++; + } + + if (netif_queue_stopped(dev) && released) + netif_wake_queue(dev); + + return released; +} + +/* + * poll func, called by network core + */ +static int bcm_enet_poll(struct napi_struct *napi, int budget) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + int tx_work_done, rx_work_done; + + priv = container_of(napi, struct bcm_enet_priv, napi); + dev = priv->net_dev; + + /* ack interrupts */ + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->tx_chan)); + + /* reclaim sent skb */ + tx_work_done = bcm_enet_tx_reclaim(dev, 0); + + spin_lock(&priv->rx_lock); + rx_work_done = bcm_enet_receive_queue(dev, budget); + spin_unlock(&priv->rx_lock); + + if (rx_work_done >= budget || tx_work_done > 0) { + /* rx/tx queue is not yet empty/clean */ + return rx_work_done; + } + + /* no more packet in rx/tx queue, remove device from poll + * queue */ + napi_complete(napi); + + /* restore rx/tx interrupt */ + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->tx_chan)); + + return rx_work_done; +} + +/* + * mac interrupt handler + */ +static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + u32 stat; + + dev = dev_id; + priv = netdev_priv(dev); + + stat = enet_readl(priv, ENET_IR_REG); + if (!(stat & ENET_IR_MIB)) + return IRQ_NONE; + + /* clear & mask interrupt */ + enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); + enet_writel(priv, 0, ENET_IRMASK_REG); + + /* read mib registers in workqueue */ + schedule_work(&priv->mib_update_task); + + return IRQ_HANDLED; +} + +/* + * rx/tx dma interrupt handler + */ +static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + + dev = dev_id; + priv = netdev_priv(dev); + + /* mask rx/tx interrupts */ + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + + napi_schedule(&priv->napi); + + return IRQ_HANDLED; +} + +/* + * tx request callback + */ +static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct bcm_enet_desc *desc; + u32 len_stat; + int ret; + + priv = netdev_priv(dev); + + /* lock against tx reclaim */ + spin_lock(&priv->tx_lock); + + /* make sure the tx hw queue is not full, should not happen + * since we stop queue before it's the case */ + if (unlikely(!priv->tx_desc_count)) { + netif_stop_queue(dev); + dev_err(&priv->pdev->dev, "xmit called with no tx desc " + "available?\n"); + ret = NETDEV_TX_BUSY; + goto out_unlock; + } + + /* point to the next available desc */ + desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; + priv->tx_skb[priv->tx_curr_desc] = skb; + + /* fill descriptor */ + desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + + len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; + len_stat |= DMADESC_ESOP_MASK | + DMADESC_APPEND_CRC | + DMADESC_OWNER_MASK; + + priv->tx_curr_desc++; + if (priv->tx_curr_desc == priv->tx_ring_size) { + priv->tx_curr_desc = 0; + len_stat |= DMADESC_WRAP_MASK; + } + priv->tx_desc_count--; + + /* dma might be already polling, make sure we update desc + * fields in correct order */ + wmb(); + desc->len_stat = len_stat; + wmb(); + + /* kick tx dma */ + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->tx_chan)); + + /* stop queue if no more desc available */ + if (!priv->tx_desc_count) + netif_stop_queue(dev); + + priv->stats.tx_bytes += skb->len; + priv->stats.tx_packets++; + dev->trans_start = jiffies; + ret = NETDEV_TX_OK; + +out_unlock: + spin_unlock(&priv->tx_lock); + return ret; +} + +/* + * Change the interface's mac address. + */ +static int bcm_enet_set_mac_address(struct net_device *dev, void *p) +{ + struct bcm_enet_priv *priv; + struct sockaddr *addr = p; + u32 val; + + priv = netdev_priv(dev); + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + /* use perfect match register 0 to store my mac address */ + val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | + (dev->dev_addr[4] << 8) | dev->dev_addr[5]; + enet_writel(priv, val, ENET_PML_REG(0)); + + val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); + val |= ENET_PMH_DATAVALID_MASK; + enet_writel(priv, val, ENET_PMH_REG(0)); + + return 0; +} + +/* + * Change rx mode (promiscous/allmulti) and update multicast list + */ +static void bcm_enet_set_multicast_list(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct dev_mc_list *mc_list; + u32 val; + int i; + + priv = netdev_priv(dev); + + val = enet_readl(priv, ENET_RXCFG_REG); + + if (dev->flags & IFF_PROMISC) + val |= ENET_RXCFG_PROMISC_MASK; + else + val &= ~ENET_RXCFG_PROMISC_MASK; + + /* only 3 perfect match registers left, first one is used for + * own mac address */ + if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3) + val |= ENET_RXCFG_ALLMCAST_MASK; + else + val &= ~ENET_RXCFG_ALLMCAST_MASK; + + /* no need to set perfect match registers if we catch all + * multicast */ + if (val & ENET_RXCFG_ALLMCAST_MASK) { + enet_writel(priv, val, ENET_RXCFG_REG); + return; + } + + for (i = 0, mc_list = dev->mc_list; + (mc_list != NULL) && (i < dev->mc_count) && (i < 3); + i++, mc_list = mc_list->next) { + u8 *dmi_addr; + u32 tmp; + + /* filter non ethernet address */ + if (mc_list->dmi_addrlen != 6) + continue; + + /* update perfect match registers */ + dmi_addr = mc_list->dmi_addr; + tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | + (dmi_addr[4] << 8) | dmi_addr[5]; + enet_writel(priv, tmp, ENET_PML_REG(i + 1)); + + tmp = (dmi_addr[0] << 8 | dmi_addr[1]); + tmp |= ENET_PMH_DATAVALID_MASK; + enet_writel(priv, tmp, ENET_PMH_REG(i + 1)); + } + + for (; i < 3; i++) { + enet_writel(priv, 0, ENET_PML_REG(i + 1)); + enet_writel(priv, 0, ENET_PMH_REG(i + 1)); + } + + enet_writel(priv, val, ENET_RXCFG_REG); +} + +/* + * set mac duplex parameters + */ +static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) +{ + u32 val; + + val = enet_readl(priv, ENET_TXCTL_REG); + if (fullduplex) + val |= ENET_TXCTL_FD_MASK; + else + val &= ~ENET_TXCTL_FD_MASK; + enet_writel(priv, val, ENET_TXCTL_REG); +} + +/* + * set mac flow control parameters + */ +static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) +{ + u32 val; + + /* rx flow control (pause frame handling) */ + val = enet_readl(priv, ENET_RXCFG_REG); + if (rx_en) + val |= ENET_RXCFG_ENFLOW_MASK; + else + val &= ~ENET_RXCFG_ENFLOW_MASK; + enet_writel(priv, val, ENET_RXCFG_REG); + + /* tx flow control (pause frame generation) */ + val = enet_dma_readl(priv, ENETDMA_CFG_REG); + if (tx_en) + val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); + else + val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); + enet_dma_writel(priv, val, ENETDMA_CFG_REG); +} + +/* + * link changed callback (from phylib) + */ +static void bcm_enet_adjust_phy_link(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct phy_device *phydev; + int status_changed; + + priv = netdev_priv(dev); + phydev = priv->phydev; + status_changed = 0; + + if (priv->old_link != phydev->link) { + status_changed = 1; + priv->old_link = phydev->link; + } + + /* reflect duplex change in mac configuration */ + if (phydev->link && phydev->duplex != priv->old_duplex) { + bcm_enet_set_duplex(priv, + (phydev->duplex == DUPLEX_FULL) ? 1 : 0); + status_changed = 1; + priv->old_duplex = phydev->duplex; + } + + /* enable flow control if remote advertise it (trust phylib to + * check that duplex is full */ + if (phydev->link && phydev->pause != priv->old_pause) { + int rx_pause_en, tx_pause_en; + + if (phydev->pause) { + /* pause was advertised by lpa and us */ + rx_pause_en = 1; + tx_pause_en = 1; + } else if (!priv->pause_auto) { + /* pause setting overrided by user */ + rx_pause_en = priv->pause_rx; + tx_pause_en = priv->pause_tx; + } else { + rx_pause_en = 0; + tx_pause_en = 0; + } + + bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); + status_changed = 1; + priv->old_pause = phydev->pause; |