aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cadence
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cadence')
-rw-r--r--drivers/net/ethernet/cadence/Kconfig44
-rw-r--r--drivers/net/ethernet/cadence/Makefile6
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c483
-rw-r--r--drivers/net/ethernet/cadence/macb.c2067
-rw-r--r--drivers/net/ethernet/cadence/macb.h621
5 files changed, 3221 insertions, 0 deletions
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
new file mode 100644
index 00000000000..9e089d24466
--- /dev/null
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -0,0 +1,44 @@
+#
+# Atmel device configuration
+#
+
+config NET_CADENCE
+ bool "Cadence devices"
+ depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+ Make sure you know the name of your card. Read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ If unsure, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the remaining Atmel network card questions. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_CADENCE
+
+config ARM_AT91_ETHER
+ tristate "AT91RM9200 Ethernet support"
+ depends on HAS_DMA && (ARCH_AT91RM9200 || COMPILE_TEST)
+ select MACB
+ ---help---
+ If you wish to compile a kernel for the AT91RM9200 and enable
+ ethernet support, then you should always answer Y to this.
+
+config MACB
+ tristate "Cadence MACB/GEM support"
+ depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
+ select PHYLIB
+ ---help---
+ The Cadence MACB ethernet interface is found on many Atmel AT32 and
+ AT91 parts. This driver also supports the Cadence GEM (Gigabit
+ Ethernet MAC found in some ARM SoC devices). Note: the Gigabit mode
+ is not yet supported. Say Y to include support for the MACB/GEM chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macb.
+
+endif # NET_CADENCE
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
new file mode 100644
index 00000000000..9068b8331ed
--- /dev/null
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Atmel network device drivers.
+#
+
+obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
+obj-$(CONFIG_MACB) += macb.o
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
new file mode 100644
index 00000000000..4a79edaf388
--- /dev/null
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -0,0 +1,483 @@
+/*
+ * Ethernet driver for the Atmel AT91RM9200 (Thunder)
+ *
+ * Copyright (C) 2003 SAN People (Pty) Ltd
+ *
+ * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
+ * Initial version by Rick Bronson 01/11/2003
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/platform_data/macb.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+
+#include "macb.h"
+
+/* 1518 rounded up */
+#define MAX_RBUFF_SZ 0x600
+/* max number of receive buffers */
+#define MAX_RX_DESCR 9
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct net_device *dev)
+{
+ struct macb *lp = netdev_priv(dev);
+ dma_addr_t addr;
+ u32 ctl;
+ int i;
+
+ lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+ (MAX_RX_DESCR *
+ sizeof(struct macb_dma_desc)),
+ &lp->rx_ring_dma, GFP_KERNEL);
+ if (!lp->rx_ring)
+ return -ENOMEM;
+
+ lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * MAX_RBUFF_SZ,
+ &lp->rx_buffers_dma, GFP_KERNEL);
+ if (!lp->rx_buffers) {
+ dma_free_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
+ lp->rx_ring = NULL;
+ return -ENOMEM;
+ }
+
+ addr = lp->rx_buffers_dma;
+ for (i = 0; i < MAX_RX_DESCR; i++) {
+ lp->rx_ring[i].addr = addr;
+ lp->rx_ring[i].ctrl = 0;
+ addr += MAX_RBUFF_SZ;
+ }
+
+ /* Set the Wrap bit on the last descriptor */
+ lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+
+ /* Reset buffer index */
+ lp->rx_tail = 0;
+
+ /* Program address of descriptor list in Rx Buffer Queue register */
+ macb_writel(lp, RBQP, lp->rx_ring_dma);
+
+ /* Enable Receive and Transmit */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+
+ return 0;
+}
+
+/* Open the ethernet interface */
+static int at91ether_open(struct net_device *dev)
+{
+ struct macb *lp = netdev_priv(dev);
+ u32 ctl;
+ int ret;
+
+ /* Clear internal statistics */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
+
+ macb_set_hwaddr(lp);
+
+ ret = at91ether_start(dev);
+ if (ret)
+ return ret;
+
+ /* Enable MAC interrupts */
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ /* schedule a link state check */
+ phy_start(lp->phy_dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* Close the interface */
+static int at91ether_close(struct net_device *dev)
+{
+ struct macb *lp = netdev_priv(dev);
+ u32 ctl;
+
+ /* Disable Receiver and Transmitter */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+ /* Disable MAC interrupts */
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ netif_stop_queue(dev);
+
+ dma_free_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
+ lp->rx_ring = NULL;
+
+ dma_free_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * MAX_RBUFF_SZ,
+ lp->rx_buffers, lp->rx_buffers_dma);
+ lp->rx_buffers = NULL;
+
+ return 0;
+}
+
+/* Transmit packet */
+static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct macb *lp = netdev_priv(dev);
+
+ if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
+ netif_stop_queue(dev);
+
+ /* Store packet information (to free when Tx completed) */
+ lp->skb = skb;
+ lp->skb_length = skb->len;
+ lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ /* Set address of the data in the Transmit Address register */
+ macb_writel(lp, TAR, lp->skb_physaddr);
+ /* Set length of the packet in the Transmit Control register */
+ macb_writel(lp, TCR, skb->len);
+
+ } else {
+ netdev_err(dev, "%s called, but device is busy!\n", __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/* Extract received frame from buffer descriptors and sent to upper layers.
+ * (Called from interrupt context)
+ */
+static void at91ether_rx(struct net_device *dev)
+{
+ struct macb *lp = netdev_priv(dev);
+ unsigned char *p_recv;
+ struct sk_buff *skb;
+ unsigned int pktlen;
+
+ while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+ p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
+ pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+ skb = netdev_alloc_skb(dev, pktlen + 2);
+ if (skb) {
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, pktlen), p_recv, pktlen);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pktlen;
+ netif_rx(skb);
+ } else {
+ lp->stats.rx_dropped++;
+ }
+
+ if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+ lp->stats.multicast++;
+
+ /* reset ownership bit */
+ lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+
+ /* wrap after last buffer */
+ if (lp->rx_tail == MAX_RX_DESCR - 1)
+ lp->rx_tail = 0;
+ else
+ lp->rx_tail++;
+ }
+}
+
+/* MAC interrupt handler */
+static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct macb *lp = netdev_priv(dev);
+ u32 intstatus, ctl;
+
+ /* MAC Interrupt Status register indicates what interrupts are pending.
+ * It is automatically cleared once read.
+ */
+ intstatus = macb_readl(lp, ISR);
+
+ /* Receive complete */
+ if (intstatus & MACB_BIT(RCOMP))
+ at91ether_rx(dev);
+
+ /* Transmit complete */
+ if (intstatus & MACB_BIT(TCOMP)) {
+ /* The TCOM bit is set even if the transmission failed */
+ if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
+ lp->stats.tx_errors++;
+
+ if (lp->skb) {
+ dev_kfree_skb_irq(lp->skb);
+ lp->skb = NULL;
+ dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += lp->skb_length;
+ }
+ netif_wake_queue(dev);
+ }
+
+ /* Work-around for EMAC Errata section 41.3.1 */
+ if (intstatus & MACB_BIT(RXUBR)) {
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+ macb_writel(lp, NCR, ctl | MACB_BIT(RE));
+ }
+
+ if (intstatus & MACB_BIT(ISR_ROVR))
+ netdev_err(dev, "ROVR error\n");
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void at91ether_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ at91ether_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+static const struct net_device_ops at91ether_netdev_ops = {
+ .ndo_open = at91ether_open,
+ .ndo_stop = at91ether_close,
+ .ndo_start_xmit = at91ether_start_xmit,
+ .ndo_get_stats = macb_get_stats,
+ .ndo_set_rx_mode = macb_set_rx_mode,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_do_ioctl = macb_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = at91ether_poll_controller,
+#endif
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id at91ether_dt_ids[] = {
+ { .compatible = "cdns,at91rm9200-emac" },
+ { .compatible = "cdns,emac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
+#endif
+
+/* Detect MAC & PHY and perform ethernet interface initialization */
+static int __init at91ether_probe(struct platform_device *pdev)
+{
+ struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
+ struct resource *regs;
+ struct net_device *dev;
+ struct phy_device *phydev;
+ struct macb *lp;
+ int res;
+ u32 reg;
+ const char *mac;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENOENT;
+
+ dev = alloc_etherdev(sizeof(struct macb));
+ if (!dev)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+ lp->pdev = pdev;
+ lp->dev = dev;
+ spin_lock_init(&lp->lock);
+
+ /* physical base address */
+ dev->base_addr = regs->start;
+ lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ if (!lp->regs) {
+ res = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ /* Clock */
+ lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
+ if (IS_ERR(lp->pclk)) {
+ res = PTR_ERR(lp->pclk);
+ goto err_free_dev;
+ }
+ clk_enable(lp->pclk);
+
+ lp->hclk = ERR_PTR(-ENOENT);
+ lp->tx_clk = ERR_PTR(-ENOENT);
+
+ /* Install the interrupt handler */
+ dev->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
+ if (res)
+ goto err_disable_clock;
+
+ ether_setup(dev);
+ dev->netdev_ops = &at91ether_netdev_ops;
+ dev->ethtool_ops = &macb_ethtool_ops;
+ platform_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ mac = of_get_mac_address(pdev->dev.of_node);
+ if (mac)
+ memcpy(lp->dev->dev_addr, mac, ETH_ALEN);
+ else
+ macb_get_hwaddr(lp);
+
+ res = of_get_phy_mode(pdev->dev.of_node);
+ if (res < 0) {
+ if (board_data && board_data->is_rmii)
+ lp->phy_interface = PHY_INTERFACE_MODE_RMII;
+ else
+ lp->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ lp->phy_interface = res;
+ }
+
+ macb_writel(lp, NCR, 0);
+
+ reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
+ if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
+ reg |= MACB_BIT(RM9200_RMII);
+
+ macb_writel(lp, NCFGR, reg);
+
+ /* Register the network interface */
+ res = register_netdev(dev);
+ if (res)
+ goto err_disable_clock;
+
+ res = macb_mii_init(lp);
+ if (res)
+ goto err_out_unregister_netdev;
+
+ /* will be enabled in open() */
+ netif_carrier_off(dev);
+
+ phydev = lp->phy_dev;
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev),
+ phydev->irq);
+
+ /* Display ethernet banner */
+ netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
+ dev->base_addr, dev->irq, dev->dev_addr);
+
+ return 0;
+
+err_out_unregister_netdev:
+ unregister_netdev(dev);
+err_disable_clock:
+ clk_disable(lp->pclk);
+err_free_dev:
+ free_netdev(dev);
+ return res;
+}
+
+static int at91ether_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *lp = netdev_priv(dev);
+
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ unregister_netdev(dev);
+ clk_disable(lp->pclk);
+ free_netdev(dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct macb *lp = netdev_priv(net_dev);
+
+ if (netif_running(net_dev)) {
+ netif_stop_queue(net_dev);
+ netif_device_detach(net_dev);
+
+ clk_disable(lp->pclk);
+ }
+ return 0;
+}
+
+static int at91ether_resume(struct platform_device *pdev)
+{
+ struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct macb *lp = netdev_priv(net_dev);
+
+ if (netif_running(net_dev)) {
+ clk_enable(lp->pclk);
+
+ netif_device_attach(net_dev);
+ netif_start_queue(net_dev);
+ }
+ return 0;
+}
+#else
+#define at91ether_suspend NULL
+#define at91ether_resume NULL
+#endif
+
+static struct platform_driver at91ether_driver = {
+ .remove = at91ether_remove,
+ .suspend = at91ether_suspend,
+ .resume = at91ether_resume,
+ .driver = {
+ .name = "at91_ether",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(at91ether_dt_ids),
+ },
+};
+
+module_platform_driver_probe(at91ether_driver, at91ether_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
+MODULE_AUTHOR("Andrew Victor");
+MODULE_ALIAS("platform:at91_ether");
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
new file mode 100644
index 00000000000..e9daa072ebb
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -0,0 +1,2067 @@
+/*
+ * Cadence MACB/GEM Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/circ_buf.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_data/macb.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "macb.h"
+
+#define MACB_RX_BUFFER_SIZE 128
+#define RX_BUFFER_MULTIPLE 64 /* bytes */
+#define RX_RING_SIZE 512 /* must be power of 2 */
+#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
+
+#define TX_RING_SIZE 128 /* must be power of 2 */
+#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
+
+/* level of occupied TX descriptors under which we wake up TX process */
+#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
+
+#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
+ | MACB_BIT(ISR_ROVR))
+#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
+ | MACB_BIT(ISR_RLE) \
+ | MACB_BIT(TXERR))
+#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+
+/*
+ * Graceful stop timeouts in us. We should allow up to
+ * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
+ */
+#define MACB_HALT_TIMEOUT 1230
+
+/* Ring buffer accessors */
+static unsigned int macb_tx_ring_wrap(unsigned int index)
+{
+ return index & (TX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
+{
+ return &bp->tx_ring[macb_tx_ring_wrap(index)];
+}
+
+static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
+{
+ return &bp->tx_skb[macb_tx_ring_wrap(index)];
+}
+
+static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
+{
+ dma_addr_t offset;
+
+ offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
+
+ return bp->tx_ring_dma + offset;
+}
+
+static unsigned int macb_rx_ring_wrap(unsigned int index)
+{
+ return index & (RX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+{
+ return &bp->rx_ring[macb_rx_ring_wrap(index)];
+}
+
+static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+{
+ return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
+}
+
+void macb_set_hwaddr(struct macb *bp)
+{
+ u32 bottom;
+ u16 top;
+
+ bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+ macb_or_gem_writel(bp, SA1B, bottom);
+ top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+ macb_or_gem_writel(bp, SA1T, top);
+
+ /* Clear unused address register sets */
+ macb_or_gem_writel(bp, SA2B, 0);
+ macb_or_gem_writel(bp, SA2T, 0);
+ macb_or_gem_writel(bp, SA3B, 0);
+ macb_or_gem_writel(bp, SA3T, 0);
+ macb_or_gem_writel(bp, SA4B, 0);
+ macb_or_gem_writel(bp, SA4T, 0);
+}
+EXPORT_SYMBOL_GPL(macb_set_hwaddr);
+
+void macb_get_hwaddr(struct macb *bp)
+{
+ struct macb_platform_data *pdata;
+ u32 bottom;
+ u16 top;
+ u8 addr[6];
+ int i;
+
+ pdata = dev_get_platdata(&bp->pdev->dev);
+
+ /* Check all 4 address register for vaild address */
+ for (i = 0; i < 4; i++) {
+ bottom = macb_or_gem_readl(bp, SA1B + i * 8);
+ top = macb_or_gem_readl(bp, SA1T + i * 8);
+
+ if (pdata && pdata->rev_eth_addr) {
+ addr[5] = bottom & 0xff;
+ addr[4] = (bottom >> 8) & 0xff;
+ addr[3] = (bottom >> 16) & 0xff;
+ addr[2] = (bottom >> 24) & 0xff;
+ addr[1] = top & 0xff;
+ addr[0] = (top & 0xff00) >> 8;
+ } else {
+ addr[0] = bottom & 0xff;
+ addr[1] = (bottom >> 8) & 0xff;
+ addr[2] = (bottom >> 16) & 0xff;
+ addr[3] = (bottom >> 24) & 0xff;
+ addr[4] = top & 0xff;
+ addr[5] = (top >> 8) & 0xff;
+ }
+
+ if (is_valid_ether_addr(addr)) {
+ memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ return;
+ }
+ }
+
+ netdev_info(bp->dev, "invalid hw address, using random\n");
+ eth_hw_addr_random(bp->dev);
+}
+EXPORT_SYMBOL_GPL(macb_get_hwaddr);
+
+static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct macb *bp = bus->priv;
+ int value;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+ | MACB_BF(RW, MACB_MAN_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_CODE)));
+
+ /* wait for end of transfer */
+ while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+ cpu_relax();
+
+ value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+
+ return value;
+}
+
+static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct macb *bp = bus->priv;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+ | MACB_BF(RW, MACB_MAN_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_CODE)
+ | MACB_BF(DATA, value)));
+
+ /* wait for end of transfer */
+ while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+ cpu_relax();
+
+ return 0;
+}
+
+/**
+ * macb_set_tx_clk() - Set a clock to a new frequency
+ * @clk Pointer to the clock to change
+ * @rate New frequency in Hz
+ * @dev Pointer to the struct net_device
+ */
+static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
+{
+ long ferr, rate, rate_rounded;
+
+ switch (speed) {
+ case SPEED_10:
+ rate = 2500000;
+ break;
+ case SPEED_100:
+ rate = 25000000;
+ break;
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+ default:
+ return;
+ }
+
+ rate_rounded = clk_round_rate(clk, rate);
+ if (rate_rounded < 0)
+ return;
+
+ /* RGMII allows 50 ppm frequency error. Test and warn if this limit
+ * is not satisfied.
+ */
+ ferr = abs(rate_rounded - rate);
+ ferr = DIV_ROUND_UP(ferr, rate / 100000);
+ if (ferr > 5)
+ netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+ rate);
+
+ if (clk_set_rate(clk, rate_rounded))
+ netdev_err(dev, "adjusting tx_clk failed.\n");
+}
+
+static void macb_handle_link_change(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+ unsigned long flags;
+
+ int status_change = 0;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ if (phydev->link) {
+ if ((bp->speed != phydev->speed) ||
+ (bp->duplex != phydev->duplex)) {
+ u32 reg;
+
+ reg = macb_readl(bp, NCFGR);
+ reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+ if (macb_is_gem(bp))
+ reg &= ~GEM_BIT(GBE);
+
+ if (phydev->duplex)
+ reg |= MACB_BIT(FD);
+ if (phydev->speed == SPEED_100)
+ reg |= MACB_BIT(SPD);
+ if (phydev->speed == SPEED_1000)
+ reg |= GEM_BIT(GBE);
+
+ macb_or_gem_writel(bp, NCFGR, reg);
+
+ bp->speed = phydev->speed;
+ bp->duplex = phydev->duplex;
+ status_change = 1;
+ }
+ }
+
+ if (phydev->link != bp->link) {
+ if (!phydev->link) {
+ bp->speed = 0;
+ bp->duplex = -1;
+ }
+ bp->link = phydev->link;
+
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ if (!IS_ERR(bp->tx_clk))
+ macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
+ if (status_change) {
+ if (phydev->link) {
+ netif_carrier_on(dev);
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
+ phydev->duplex == DUPLEX_FULL ?
+ "Full" : "Half");
+ } else {
+ netif_carrier_off(dev);
+ netdev_info(dev, "link down\n");
+ }
+ }
+}
+
+/* based on au1000_eth. c*/
+static int macb_mii_probe(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_platform_data *pdata;
+ struct phy_device *phydev;
+ int phy_irq;
+ int ret;
+
+ phydev = phy_find_first(bp->mii_bus);
+ if (!phydev) {
+ netdev_err(dev, "no PHY found\n");
+ return -ENXIO;
+ }
+
+ pdata = dev_get_platdata(&bp->pdev->dev);
+ if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
+ ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+ if (!ret) {
+ phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+ phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ }
+ }
+
+ /* attach the mac to the phy */
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
+ bp->phy_interface);
+ if (ret) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return ret;
+ }
+
+ /* mask with MAC supported features */
+ if (macb_is_gem(bp))
+ phydev->supported &= PHY_GBIT_FEATURES;
+ else
+ phydev->supported &= PHY_BASIC_FEATURES;
+
+ phydev->advertising = phydev->supported;
+
+ bp->link = 0;
+ bp->speed = 0;
+ bp->duplex = -1;
+ bp->phy_dev = phydev;
+
+ return 0;
+}
+
+int macb_mii_init(struct macb *bp)
+{
+ struct macb_platform_data *pdata;
+ struct device_node *np;
+ int err = -ENXIO, i;
+
+ /* Enable management port */
+ macb_writel(bp, NCR, MACB_BIT(MPE));
+
+ bp->mii_bus = mdiobus_alloc();
+ if (bp->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ bp->mii_bus->name = "MACB_mii_bus";
+ bp->mii_bus->read = &macb_mdio_read;
+ bp->mii_bus->write = &macb_mdio_write;
+ snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ bp->pdev->name, bp->pdev->id);
+ bp->mii_bus->priv = bp;
+ bp->mii_bus->parent = &bp->dev->dev;
+ pdata = dev_get_platdata(&bp->pdev->dev);
+
+ bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bp->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
+
+ np = bp->pdev->dev.of_node;
+ if (np) {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
+
+ /* fallback to standard phy registration if no phy were
+ found during dt phy registration */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
+ break;
+ }
+ }
+
+ if (err)
+ goto err_out_unregister_bus;
+ }
+ } else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
+
+ err = mdiobus_register(bp->mii_bus);
+ }
+
+ if (err)
+ goto err_out_free_mdio_irq;
+
+ err = macb_mii_probe(bp->dev);
+ if (err)
+ goto err_out_unregister_bus;
+
+ return 0;
+
+err_out_unregister_bus:
+ mdiobus_unregister(bp->mii_bus);
+err_out_free_mdio_irq:
+ kfree(bp->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(bp->mii_bus);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(macb_mii_init);
+
+static void macb_update_stats(struct macb *bp)
+{
+ u32 __iomem *reg = bp->regs + MACB_PFR;
+ u32 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+
+ WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
+
+ for(; p < end; p++, reg++)
+ *p += __raw_readl(reg);
+}
+
+static int macb_halt_tx(struct macb *bp)
+{
+ unsigned long halt_time, timeout;
+ u32 status;
+
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
+
+ timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
+ do {
+ halt_time = jiffies;
+ status = macb_readl(bp, TSR);
+ if (!(status & MACB_BIT(TGO)))
+ return 0;
+
+ usleep_range(10, 250);
+ } while (time_before(halt_time, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static void macb_tx_error_task(struct work_struct *work)
+{
+ struct macb *bp = container_of(work, struct macb, tx_error_task);
+ struct macb_tx_skb *tx_skb;
+ struct sk_buff *skb;
+ unsigned int tail;
+
+ netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
+ bp->tx_tail, bp->tx_head);
+
+ /* Make sure nobody is trying to queue up new packets */
+ netif_stop_queue(bp->dev);
+
+ /*
+ * Stop transmission now
+ * (in case we have just queued new packets)
+ */
+ if (macb_halt_tx(bp))
+ /* Just complain for now, reinitializing TX path can be good */
+ netdev_err(bp->dev, "BUG: halt tx timed out\n");
+
+ /* No need for the lock here as nobody will interrupt us anymore */
+
+ /*
+ * Treat frames in TX queue including the ones that caused the error.
+ * Free transmit buffers in upper layer.
+ */
+ for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
+ struct macb_dma_desc *desc;
+ u32 ctrl;
+
+ desc = macb_tx_desc(bp, tail);
+ ctrl = desc->ctrl;
+ tx_skb = macb_tx_skb(bp, tail);
+ skb = tx_skb->skb;
+
+ if (ctrl & MACB_BIT(TX_USED)) {
+ netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
+ macb_tx_ring_wrap(tail), skb->data);
+ bp->stats.tx_packets++;
+ bp->stats.tx_bytes += skb->len;
+ } else {
+ /*
+ * "Buffers exhausted mid-frame" errors may only happen
+ * if the driver is buggy, so complain loudly about those.
+ * Statistics are updated by hardware.
+ */
+ if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
+ netdev_err(bp->dev,
+ "BUG: TX buffers exhausted mid-frame\n");
+
+ desc->ctrl = ctrl | MACB_BIT(TX_USED);
+ }
+
+ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+ DMA_TO_DEVICE);
+ tx_skb->skb = NULL;
+ dev_kfree_skb(skb);
+ }
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ /* Reinitialize the TX desc queue */
+ macb_writel(bp, TBQP, bp->tx_ring_dma);
+ /* Make TX ring reflect state of hardware */
+ bp->tx_head = bp->tx_tail = 0;
+
+ /* Now we are ready to start transmission again */
+ netif_wake_queue(bp->dev);
+
+ /* Housework before enabling TX IRQ */
+ macb_writel(bp, TSR, macb_readl(bp, TSR));
+ macb_writel(bp, IER, MACB_TX_INT_FLAGS);
+}
+
+static void macb_tx_interrupt(struct macb *bp)
+{
+ unsigned int tail;
+ unsigned int head;
+ u32 status;
+
+ status = macb_readl(bp, TSR);
+ macb_writel(bp, TSR, status);
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(TCOMP));
+
+ netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
+ (unsigned long)status);
+
+ head = bp->tx_head;
+ for (tail = bp->tx_tail; tail != head; tail++) {
+ struct macb_tx_skb *tx_skb;
+ struct sk_buff *skb;
+ struct macb_dma_desc *desc;
+ u32 ctrl;
+
+ desc = macb_tx_desc(bp, tail);
+
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ ctrl = desc->ctrl;
+
+ if (!(ctrl & MACB_BIT(TX_USED)))
+ break;
+
+ tx_skb = macb_tx_skb(bp, tail);
+ skb = tx_skb->skb;
+
+ netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
+ macb_tx_ring_wrap(tail), skb->data);
+ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+ DMA_TO_DEVICE);
+ bp->stats.tx_packets++;
+ bp->stats.tx_bytes += skb->len;
+ tx_skb->skb = NULL;
+ dev_kfree_skb_irq(skb);
+ }
+
+ bp->tx_tail = tail;
+ if (netif_queue_stopped(bp->dev)
+ && CIRC_CNT(bp->tx_head, bp->tx_tail,
+ TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
+ netif_wake_queue(bp->dev);
+}
+
+static void gem_rx_refill(struct macb *bp)
+{
+ unsigned int entry;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+ entry = macb_rx_ring_wrap(bp->rx_prepared_head);
+
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ bp->rx_prepared_head++;
+
+ if (bp->rx_skbuff[entry] == NULL) {
+ /* allocate sk_buff for this free entry in ring */
+ skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
+ if (unlikely(skb == NULL)) {
+ netdev_err(bp->dev,
+ "Unable to allocate sk_buff\n");
+ break;
+ }
+
+ /* now fill corresponding descriptor entry */
+ paddr = dma_map_single(&bp->pdev->dev, skb->data,
+ bp->rx_buffer_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, paddr)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ bp->rx_skbuff[entry] = skb;
+
+ if (entry == RX_RING_SIZE - 1)
+ paddr |= MACB_BIT(RX_WRAP);
+ bp->rx_ring[entry].addr = paddr;
+ bp->rx_ring[entry].ctrl = 0;
+
+ /* properly align Ethernet header */
+ skb_reserve(skb, NET_IP_ALIGN);
+ }
+ }
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
+ bp->rx_prepared_head, bp->rx_tail);
+}
+
+/* Mark DMA descriptors from begin up to and not including end as unused */
+static void discard_partial_frame(struct macb *bp, unsigned int begin,
+ unsigned int end)
+{
+ unsigned int frag;
+
+ for (frag = begin; frag != end; frag++) {
+ struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
+ }
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ /*
+ * When this happens, the hardware stats registers for
+ * whatever caused this is updated, so we don't have to record
+ * anything.
+ */
+}
+
+static int gem_rx(struct macb *bp, int budget)
+{
+ unsigned int len;
+ unsigned int entry;
+ struct sk_buff *skb;
+ struct macb_dma_desc *desc;
+ int count = 0;
+
+ while (count < budget) {
+ u32 addr, ctrl;
+
+ entry = macb_rx_ring_wrap(bp->rx_tail);
+ desc = &bp->rx_ring[entry];
+
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ addr = desc->addr;
+ ctrl = desc->ctrl;
+
+ if (!(addr & MACB_BIT(RX_USED)))
+ break;
+
+ bp->rx_tail++;
+ count++;
+
+ if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
+ netdev_err(bp->dev,
+ "not whole frame pointed by descriptor\n");
+ bp->stats.rx_dropped++;
+ break;
+ }
+ skb = bp->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ netdev_err(bp->dev,
+ "inconsistent Rx descriptor chain\n");
+ bp->stats.rx_dropped++;
+ break;
+ }
+ /* now everything is ready for receiving packet */
+ bp->rx_skbuff[entry] = NULL;
+ len = MACB_BFEXT(RX_FRMLEN, ctrl);
+
+ netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
+
+ skb_put(skb, len);
+ addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
+ dma_unmap_single(&bp->pdev->dev, addr,
+ bp->rx_buffer_size, DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, bp->dev);
+ skb_checksum_none_assert(skb);
+
+ bp->stats.rx_packets++;
+ bp->stats.rx_bytes += skb->len;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+ netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ skb->len, skb->csum);
+ print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ skb->mac_header, 16, true);
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ skb->data, 32, true);
+#endif
+
+ netif_receive_skb(skb);
+ }
+
+ gem_rx_refill(bp);
+
+ return count;
+}
+
+static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+ unsigned int last_frag)
+{
+ unsigned int len;
+ unsigned int frag;
+ unsigned int offset;
+ struct sk_buff *skb;
+ struct macb_dma_desc *desc;
+
+ desc = macb_rx_desc(bp, last_frag);
+ len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
+
+ netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+ macb_rx_ring_wrap(first_frag),
+ macb_rx_ring_wrap(last_frag), len);
+
+ /*
+ * The ethernet header starts NET_IP_ALIGN bytes into the
+ * first buffer. Since the header is 14 bytes, this makes the
+ * payload word-aligned.
+ *
+ * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
+ * the two padding bytes into the skb so that we avoid hitting
+ * the slowpath in memcpy(), and pull them off afterwards.
+ */
+ skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
+ if (!skb) {
+ bp->stats.rx_dropped++;
+ for (frag = first_frag; ; frag++) {
+ desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
+ if (frag == last_frag)
+ break;
+ }
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ return 1;
+ }
+
+ offset = 0;
+ len += NET_IP_ALIGN;
+ skb_checksum_none_assert(skb);
+ skb_put(skb, len);
+
+ for (frag = first_frag; ; frag++) {
+ unsigned int frag_len = bp->rx_buffer_size;
+
+ if (offset + frag_len > len) {
+ BUG_ON(frag != last_frag);
+ frag_len = len - offset;
+ }
+ skb_copy_to_linear_data_offset(skb, offset,
+ macb_rx_buffer(bp, frag), frag_len);
+ offset += bp->rx_buffer_size;
+ desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
+
+ if (frag == last_frag)
+ break;
+ }
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ __skb_pull(skb, NET_IP_ALIGN);
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ bp->stats.rx_packets++;
+ bp->stats.rx_bytes += skb->len;
+ netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ skb->len, skb->csum);
+ netif_receive_skb(skb);
+
+ return 0;
+}
+
+static int macb_rx(struct macb *bp, int budget)
+{
+ int received = 0;
+ unsigned int tail;
+ int first_frag = -1;
+
+ for (tail = bp->rx_tail; budget > 0; tail++) {
+ struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
+ u32 addr, ctrl;
+
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ addr = desc->addr;
+ ctrl = desc->ctrl;
+
+ if (!(addr & MACB_BIT(RX_USED)))
+ break;
+
+ if (ctrl & MACB_BIT(RX_SOF)) {
+ if (first_frag != -1)
+ discard_partial_frame(bp, first_frag, tail);
+ first_frag = tail;
+ }
+
+ if (ctrl & MACB_BIT(RX_EOF)) {
+ int dropped;
+ BUG_ON(first_frag == -1);
+
+ dropped = macb_rx_frame(bp, first_frag, tail);
+ first_frag = -1;
+ if (!dropped) {
+ received++;
+ budget--;
+ }
+ }
+ }
+
+ if (first_frag != -1)
+ bp->rx_tail = first_frag;
+ else
+ bp->rx_tail = tail;
+
+ return received;
+}
+
+static int macb_poll(struct napi_struct *napi, int budget)
+{
+ struct macb *bp = container_of(napi, struct macb, napi);
+ int work_done;
+ u32 status;
+
+ status = macb_readl(bp, RSR);
+ macb_writel(bp, RSR, status);
+
+ work_done = 0;
+
+ netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
+ (unsigned long)status, budget);
+
+ work_done = bp->macbgem_ops.mog_rx(bp, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ /* Packets received while interrupts were disabled */
+ status = macb_readl(bp, RSR);
+ if (status) {
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(RCOMP));
+ napi_reschedule(napi);
+ } else {
+ macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ }
+ }
+
+ /* TODO: Handle errors */
+
+ return work_done;
+}
+
+static irqreturn_t macb_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct macb *bp = netdev_priv(dev);
+ u32 status;
+
+ status = macb_readl(bp, ISR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ spin_lock(&bp->lock);
+
+ while (status) {
+ /* close possible race with dev_close */
+ if (unlikely(!netif_running(dev))) {
+ macb_writel(bp, IDR, -1);
+ break;
+ }
+
+ netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
+
+ if (status & MACB_RX_INT_FLAGS) {
+ /*
+ * There's no point taking any more interrupts
+ * until we have processed the buffers. The
+ * scheduling call may fail if the poll routine
+ * is already scheduled, so disable interrupts
+ * now.
+ */
+ macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(RCOMP));
+
+ if (napi_schedule_prep(&bp->napi)) {
+ netdev_vdbg(bp->dev, "scheduling RX softirq\n");
+ __napi_schedule(&bp->napi);
+ }
+ }
+
+ if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
+ macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
+ schedule_work(&bp->tx_error_task);
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
+
+ break;
+ }
+
+ if (status & MACB_BIT(TCOMP))
+ macb_tx_interrupt(bp);
+
+ /*
+ * Link change detection isn't possible with RMII, so we'll
+ * add that if/when we get our hands on a full-blown MII PHY.
+ */
+
+ if (status & MACB_BIT(ISR_ROVR)) {
+ /* We missed at least one packet */
+ if (macb_is_gem(bp))
+ bp->hw_stats.gem.rx_overruns++;
+ else
+ bp->hw_stats.macb.rx_overruns++;
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
+ }
+
+ if (status & MACB_BIT(HRESP)) {
+ /*
+ * TODO: Reset the hardware, and maybe move the
+ * netdev_err to a lower-priority context as well
+ * (work queue?)
+ */
+ netdev_err(dev, "DMA bus error: HRESP not OK\n");
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(HRESP));
+ }
+
+ status = macb_readl(bp, ISR);
+ }
+
+ spin_unlock(&bp->lock);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void macb_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ macb_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ dma_addr_t mapping;
+ unsigned int len, entry;
+ struct macb_dma_desc *desc;
+ struct macb_tx_skb *tx_skb;
+ u32 ctrl;
+ unsigned long flags;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+ netdev_vdbg(bp->dev,
+ "start_xmit: len %u head %p data %p tail %p end %p\n",
+ skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, 16, true);
+#endif
+
+ len = skb->len;
+ spin_lock_irqsave(&bp->lock, flags);
+
+ /* This is a hard error, log it. */
+ if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
+ bp->tx_head, bp->tx_tail);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = macb_tx_ring_wrap(bp->tx_head);
+ netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+
+ bp->tx_head++;
+ tx_skb = &bp->tx_skb[entry];
+ tx_skb->skb = skb;
+ tx_skb->mapping = mapping;
+ netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+ skb->data, (unsigned long)mapping);
+
+ ctrl = MACB_BF(TX_FRMLEN, len);
+ ctrl |= MACB_BIT(TX_LAST);
+ if (entry == (TX_RING_SIZE - 1))
+ ctrl |= MACB_BIT(TX_WRAP);
+
+ desc = &bp->tx_ring[entry];
+ desc->addr = mapping;
+ desc->ctrl = ctrl;
+
+ /* Make newly initialized descriptor visible to hardware */
+ wmb();
+
+ skb_tx_timestamp(skb);
+
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+
+ if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
+ netif_stop_queue(dev);
+
+unlock:
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
+{
+ if (!macb_is_gem(bp)) {
+ bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
+ } else {
+ bp->rx_buffer_size = size;
+
+ if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
+ netdev_dbg(bp->dev,
+ "RX buffer must be multiple of %d bytes, expanding\n",
+ RX_BUFFER_MULTIPLE);
+ bp->rx_buffer_size =
+ roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
+ }
+ }
+
+ netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
+ bp->dev->mtu, bp->rx_buffer_size);
+}
+
+static void gem_free_rx_buffers(struct macb *bp)
+{
+ struct sk_buff *skb;
+ struct macb_dma_desc *desc;
+ dma_addr_t addr;
+ int i;
+
+ if (!bp->rx_skbuff)
+ return;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = bp->rx_skbuff[i];
+
+ if (skb == NULL)
+ continue;
+
+ desc = &bp->rx_ring[i];
+ addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+ dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+
+ kfree(bp->rx_skbuff);
+ bp->rx_skbuff = NULL;
+}
+
+static void macb_free_rx_buffers(struct macb *bp)
+{
+ if (bp->rx_buffers) {
+ dma_free_coherent(&bp->pdev->dev,
+ RX_RING_SIZE * bp->rx_buffer_size,
+ bp->rx_buffers, bp->rx_buffers_dma);
+ bp->rx_buffers = NULL;
+ }
+}
+
+static void macb_free_consistent(struct macb *bp)
+{
+ if (bp->tx_skb) {
+ kfree(bp->tx_skb);
+ bp->tx_skb = NULL;
+ }
+ bp->macbgem_ops.mog_free_rx_buffers(bp);
+ if (bp->rx_ring) {
+ dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
+ bp->rx_ring, bp->rx_ring_dma);
+ bp->rx_ring = NULL;
+ }
+ if (bp->tx_ring) {
+ dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
+ bp->tx_ring, bp->tx_ring_dma);
+ bp->tx_ring = NULL;
+ }
+}
+
+static int gem_alloc_rx_buffers(struct macb *bp)
+{
+ int size;
+
+ size = RX_RING_SIZE * sizeof(struct sk_buff *);
+ bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
+ if (!bp->rx_skbuff)
+ return -ENOMEM;
+ else
+ netdev_dbg(bp->dev,
+ "Allocated %d RX struct sk_buff entries at %p\n",
+ RX_RING_SIZE, bp->rx_skbuff);
+ return 0;
+}
+
+static int macb_alloc_rx_buffers(struct macb *bp)
+{
+ int size;
+
+ size = RX_RING_SIZE * bp->rx_buffer_size;
+ bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+ &bp->rx_buffers_dma, GFP_KERNEL);
+ if (!bp->rx_buffers)
+ return -ENOMEM;
+ else
+ netdev_dbg(bp->dev,
+ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+ return 0;
+}
+
+static int macb_alloc_consistent(struct macb *bp)
+{
+ int size;
+
+ size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
+ bp->tx_skb = kmalloc(size, GFP_KERNEL);
+ if (!bp->tx_skb)
+ goto out_err;
+
+ size = RX_RING_BYTES;
+ bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &bp->rx_ring_dma, GFP_KERNEL);
+ if (!bp->rx_ring)
+ goto out_err;
+ netdev_dbg(bp->dev,
+ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+
+ size = TX_RING_BYTES;
+ bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &bp->tx_ring_dma, GFP_KERNEL);
+ if (!bp->tx_ring)
+ goto out_err;
+ netdev_dbg(bp->dev,
+ "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
+
+ if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
+ goto out_err;
+
+ return 0;
+
+out_err:
+ macb_free_consistent(bp);
+ return -ENOMEM;
+}
+
+static void gem_init_rings(struct macb *bp)
+{
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ bp->tx_ring[i].addr = 0;
+ bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ }
+ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+ bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
+
+ gem_rx_refill(bp);
+}
+
+static void macb_init_rings(struct macb *bp)
+{
+ int i;
+ dma_addr_t addr;
+
+ addr = bp->rx_buffers_dma;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ bp->rx_ring[i].addr = addr;
+ bp->rx_ring[i].ctrl = 0;
+ addr += bp->rx_buffer_size;
+ }
+ bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ bp->tx_ring[i].addr = 0;
+ bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ }
+ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+ bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
+}
+
+static void macb_reset_hw(struct macb *bp)
+{
+ /*
+ * Disable RX and TX (XXX: Should we halt the transmission
+ * more gracefully?)
+ */
+ macb_writel(bp, NCR, 0);
+
+ /* Clear the stats registers (XXX: Update stats first?) */
+ macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
+
+ /* Clear all status flags */
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
+
+ /* Disable all interrupts */
+ macb_writel(bp, IDR, -1);
+ macb_readl(bp, ISR);
+}
+
+static u32 gem_mdc_clk_div(struct macb *bp)
+{
+ u32 config;
+ unsigned long pclk_hz = clk_get_rate(bp->pclk);
+
+ if (pclk_hz <= 20000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV8);
+ else if (pclk_hz <= 40000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV16);
+ else if (pclk_hz <= 80000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV32);
+ else if (pclk_hz <= 120000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV48);
+ else if (pclk_hz <= 160000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV64);
+ else
+ config = GEM_BF(CLK, GEM_CLK_DIV96);
+
+ return config;
+}
+
+static u32 macb_mdc_clk_div(struct macb *bp)
+{
+ u32 config;
+ unsigned long pclk_hz;
+
+ if (macb_is_gem(bp))
+ return gem_mdc_clk_div(bp);
+
+ pclk_hz = clk_get_rate(bp->pclk);
+ if (pclk_hz <= 20000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV8);
+ else if (pclk_hz <= 40000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV16);
+ else if (pclk_hz <= 80000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV32);
+ else
+ config = MACB_BF(CLK, MACB_CLK_DIV64);
+
+ return config;
+}
+
+/*
+ * Get the DMA bus width field of the network configuration register that we
+ * should program. We find the width from decoding the design configuration
+ * register to find the maximum supported data bus width.
+ */
+static u32 macb_dbw(struct macb *bp)
+{
+ if (!macb_is_gem(bp))
+ return 0;
+
+ switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
+ case 4:
+ return GEM_BF(DBW, GEM_DBW128);
+ case 2:
+ return GEM_BF(DBW, GEM_DBW64);
+ case 1:
+ default:
+ return GEM_BF(DBW, GEM_DBW32);
+ }
+}
+
+/*
+ * Configure the receive DMA engine
+ * - use the correct receive buffer size
+ * - set the possibility to use INCR16 bursts
+ * (if not supported by FIFO, it will fallback to default)
+ * - set both rx/tx packet buffers to full memory size
+ * These are configurable parameters for GEM.
+ */
+static void macb_configure_dma(struct macb *bp)
+{
+ u32 dmacfg;
+
+ if (macb_is_gem(bp)) {
+ dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
+ dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
+ dmacfg |= GEM_BF(FBLDO, 16);
+ dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
+ dmacfg &= ~GEM_BIT(ENDIA);
+ gem_writel(bp, DMACFG, dmacfg);
+ }
+}
+
+/*
+ * Configure peripheral capacities according to integration options used
+ */
+static void macb_configure_caps(struct macb *bp)
+{
+ if (macb_is_gem(bp)) {
+ if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
+ bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+ }
+}
+
+static void macb_init_hw(struct macb *bp)
+{
+ u32 config;
+
+ macb_reset_hw(bp);
+ macb_set_hwaddr(bp);
+
+ config = macb_mdc_clk_div(bp);
+ config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
+ config |= MACB_BIT(PAE); /* PAuse Enable */
+ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+ config |= MACB_BIT(BIG); /* Receive oversized frames */
+ if (bp->dev->flags & IFF_PROMISC)
+ config |= MACB_BIT(CAF); /* Copy All Frames */
+ if (!(bp->dev->flags & IFF_BROADCAST))
+ config |= MACB_BIT(NBC); /* No BroadCast */
+ config |= macb_dbw(bp);
+ macb_writel(bp, NCFGR, config);
+ bp->speed = SPEED_10;
+ bp->duplex = DUPLEX_HALF;
+
+ macb_configure_dma(bp);
+ macb_configure_caps(bp);
+
+ /* Initialize TX and RX buffers */
+ macb_writel(bp, RBQP, bp->rx_ring_dma);
+ macb_writel(bp, TBQP, bp->tx_ring_dma);
+
+ /* Enable TX and RX */
+ macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
+
+ /* Enable interrupts */
+ macb_writel(bp, IER, (MACB_RX_INT_FLAGS
+ | MACB_TX_INT_FLAGS
+ | MACB_BIT(HRESP)));
+
+}
+
+/*
+ * The hash address register is 64 bits long and takes up two
+ * locations in the memory map. The least significant bits are stored
+ * in EMAC_HSL and the most significant bits in EMAC_HSH.
+ *
+ * The unicast hash enable and the multicast hash enable bits in the
+ * network configuration register enable the reception of hash matched
+ * frames. The destination address is reduced to a 6 bit index into
+ * the 64 bit hash register using the following hash function. The
+ * hash function is an exclusive or of every sixth bit of the
+ * destination address.
+ *
+ * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
+ * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
+ * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
+ * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
+ * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
+ * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
+ *
+ * da[0] represents the least significant bit of the first byte
+ * received, that is, the multicast/unicast indicator, and da[47]
+ * represents the most significant bit of the last byte received. If
+ * the hash index, hi[n], points to a bit that is set in the hash
+ * register then the frame will be matched according to whether the
+ * frame is multicast or unicast. A multicast match will be signalled
+ * if the multicast hash enable bit is set, da[0] is 1 and the hash
+ * index points to a bit set in the hash register. A unicast match
+ * will be signalled if the unicast hash enable bit is set, da[0] is 0
+ * and the hash index points to a bit set in the hash register. To
+ * receive all multicast frames, the hash register should be set with
+ * all ones and the multicast hash enable bit should be set in the
+ * network configuration register.
+ */
+
+static inline int hash_bit_value(int bitnr, __u8 *addr)
+{
+ if (addr[bitnr / 8] & (1 << (bitnr % 8)))
+ return 1;
+ return 0;
+}
+
+/*
+ * Return the hash index value for the specified address.
+ */
+static int hash_get_index(__u8 *addr)
+{
+ int i, j, bitval;
+ int hash_index = 0;
+
+ for (j = 0; j < 6; j++) {
+ for (i = 0, bitval = 0; i < 8; i++)
+ bitval ^= hash_bit_value(i*6 + j, addr);
+
+ hash_index |= (bitval << j);
+ }
+
+ return hash_index;
+}
+
+/*
+ * Add multicast addresses to the internal multicast-hash table.
+ */
+static void macb_sethashtable(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ unsigned long mc_filter[2];
+ unsigned int bitnr;
+ struct macb *bp = netdev_priv(dev);
+
+ mc_filter[0] = mc_filter[1] = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
+ mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
+ }
+
+ macb_or_gem_writel(bp, HRB, mc_filter[0]);
+ macb_or_gem_writel(bp, HRT, mc_filter[1]);
+}
+
+/*
+ * Enable/Disable promiscuous and multicast modes.
+ */
+void macb_set_rx_mode(struct net_device *dev)
+{
+ unsigned long cfg;
+ struct macb *bp = netdev_priv(dev);
+
+ cfg = macb_readl(bp, NCFGR);
+
+ if (dev->flags & IFF_PROMISC)
+ /* Enable promiscuous mode */
+ cfg |= MACB_BIT(CAF);
+ else if (dev->flags & (~IFF_PROMISC))
+ /* Disable promiscuous mode */
+ cfg &= ~MACB_BIT(CAF);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Enable all multicast mode */
+ macb_or_gem_writel(bp, HRB, -1);
+ macb_or_gem_writel(bp, HRT, -1);
+ cfg |= MACB_BIT(NCFGR_MTI);
+ } else if (!netdev_mc_empty(dev)) {
+ /* Enable specific multicasts */
+ macb_sethashtable(dev);
+ cfg |= MACB_BIT(NCFGR_MTI);
+ } else if (dev->flags & (~IFF_ALLMULTI)) {
+ /* Disable all multicast mode */
+ macb_or_gem_writel(bp, HRB, 0);
+ macb_or_gem_writel(bp, HRT, 0);
+ cfg &= ~MACB_BIT(NCFGR_MTI);
+ }
+
+ macb_writel(bp, NCFGR, cfg);
+}
+EXPORT_SYMBOL_GPL(macb_set_rx_mode);
+
+static int macb_open(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+ int err;
+
+ netdev_dbg(bp->dev, "open\n");
+
+ /* carrier starts down */
+ netif_carrier_off(dev);
+
+ /* if the phy is not yet register, retry later*/
+ if (!bp->phy_dev)
+ return -EAGAIN;
+
+ /* RX buffers initialization */
+ macb_init_rx_buffer_size(bp, bufsz);
+
+ err = macb_alloc_consistent(bp);
+ if (err) {
+ netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
+ err);
+ return err;
+ }
+
+ napi_enable(&bp->napi);
+
+ bp->macbgem_ops.mog_init_rings(bp);
+ macb_init_hw(bp);
+
+ /* schedule a link state check */
+ phy_start(bp->phy_dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int macb_close(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ napi_disable(&bp->napi);
+
+ if (bp->phy_dev)
+ phy_stop(bp->phy_dev);
+
+ spin_lock_irqsave(&bp->lock, flags);
+ macb_reset_hw(bp);
+ netif_carrier_off(dev);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ macb_free_consistent(bp);
+
+ return 0;
+}
+
+static void gem_update_stats(struct macb *bp)
+{
+ u32 __iomem *reg = bp->regs + GEM_OTX;
+ u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
+
+ for (; p < end; p++, reg++)
+ *p += __raw_readl(reg);
+}
+
+static struct net_device_stats *gem_get_stats(struct macb *bp)
+{
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+ struct net_device_stats *nstat = &bp->stats;
+
+ gem_update_stats(bp);
+
+ nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
+ hwstat->rx_alignment_errors +
+ hwstat->rx_resource_errors +
+ hwstat->rx_overruns +
+ hwstat->rx_oversize_frames +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersized_frames +
+ hwstat->rx_length_field_frame_errors);
+ nstat->tx_errors = (hwstat->tx_late_collisions +
+ hwstat->tx_excessive_collisions +
+ hwstat->tx_underrun +
+ hwstat->tx_carrier_sense_errors);
+ nstat->multicast = hwstat->rx_multicast_frames;
+ nstat->collisions = (hwstat->tx_single_collision_frames +
+ hwstat->tx_multiple_collision_frames +
+ hwstat->tx_excessive_collisions);
+ nstat->rx_length_errors = (hwstat->rx_oversize_frames +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersized_frames +
+ hwstat->rx_length_field_frame_errors);
+ nstat->rx_over_errors = hwstat->rx_resource_errors;
+ nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
+ nstat->rx_frame_errors = hwstat->rx_alignment_errors;
+ nstat->rx_fifo_errors = hwstat->rx_overruns;
+ nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
+ nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
+ nstat->tx_fifo_errors = hwstat->tx_underrun;
+
+ return nstat;
+}
+
+struct net_device_stats *macb_get_stats(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct net_device_stats *nstat = &bp->stats;
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ if (macb_is_gem(bp))
+ return gem_get_stats(bp);
+
+ /* read stats from hardware */
+ macb_update_stats(bp);
+
+ /* Convert HW stats into netdevice stats */
+ nstat->rx_errors = (hwstat->rx_fcs_errors +
+ hwstat->rx_align_errors +
+ hwstat->rx_resource_errors +
+ hwstat->rx_overruns +
+ hwstat->rx_oversize_pkts +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersize_pkts +
+ hwstat->sqe_test_errors +
+ hwstat->rx_length_mismatch);
+ nstat->tx_errors = (hwstat->tx_late_cols +
+ hwstat->tx_excessive_cols +
+ hwstat->tx_underruns +
+ hwstat->tx_carrier_errors);
+ nstat->collisions = (hwstat->tx_single_cols +
+ hwstat->tx_multiple_cols +
+ hwstat->tx_excessive_cols);
+ nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersize_pkts +
+ hwstat->rx_length_mismatch);
+ nstat->rx_over_errors = hwstat->rx_resource_errors +
+ hwstat->rx_overruns;
+ nstat->rx_crc_errors = hwstat->rx_fcs_errors;
+ nstat->rx_frame_errors = hwstat->rx_align_errors;
+ nstat->rx_fifo_errors = hwstat->rx_overruns;
+ /* XXX: What does "missed" mean? */
+ nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
+ nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
+ nstat->tx_fifo_errors = hwstat->tx_underruns;
+ /* Don't know about heartbeat or window errors... */
+
+ return nstat;
+}
+EXPORT_SYMBOL_GPL(macb_get_stats);
+
+static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static int macb_get_regs_len(struct net_device *netdev)
+{
+ return MACB_GREGS_NBR * sizeof(u32);
+}
+
+static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct macb *bp = netdev_priv(dev);
+ unsigned int tail, head;
+ u32 *regs_buff = p;
+
+ regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
+ | MACB_GREGS_VERSION;
+
+ tail = macb_tx_ring_wrap(bp->tx_tail);
+ head = macb_tx_ring_wrap(bp->tx_head);
+
+ regs_buff[0] = macb_readl(bp, NCR);
+ regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
+ regs_buff[2] = macb_readl(bp, NSR);
+ regs_buff[3] = macb_readl(bp, TSR);
+ regs_buff[4] = macb_readl(bp, RBQP);
+ regs_buff[5] = macb_readl(bp, TBQP);
+ regs_buff[6] = macb_readl(bp, RSR);
+ regs_buff[7] = macb_readl(bp, IMR);
+
+ regs_buff[8] = tail;
+ regs_buff[9] = head;
+ regs_buff[10] = macb_tx_dma(bp, tail);
+ regs_buff[11] = macb_tx_dma(bp, head);
+
+ if (macb_is_gem(bp)) {
+ regs_buff[12] = gem_readl(bp, USRIO);
+ regs_buff[13] = gem_readl(bp, DMACFG);
+ }
+}
+
+const struct ethtool_ops macb_ethtool_ops = {
+ .get_settings = macb_get_settings,
+ .set_settings = macb_set_settings,
+ .get_regs_len = macb_get_regs_len,
+ .get_regs = macb_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+EXPORT_SYMBOL_GPL(macb_ethtool_ops);
+
+int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, rq, cmd);
+}
+EXPORT_SYMBOL_GPL(macb_ioctl);
+
+static const struct net_device_ops macb_netdev_ops = {
+ .ndo_open = macb_open,
+ .ndo_stop = macb_close,
+ .ndo_start_xmit = macb_start_xmit,
+ .ndo_set_rx_mode = macb_set_rx_mode,
+ .ndo_get_stats = macb_get_stats,
+ .ndo_do_ioctl = macb_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = macb_poll_controller,
+#endif
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id macb_dt_ids[] = {
+ { .compatible = "cdns,at32ap7000-macb" },
+ { .compatible = "cdns,at91sam9260-macb" },
+ { .compatible = "cdns,macb" },
+ { .compatible = "cdns,pc302-gem" },
+ { .compatible = "cdns,gem" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, macb_dt_ids);
+#endif
+
+static int __init macb_probe(struct platform_device *pdev)
+{
+ struct macb_platform_data *pdata;
+ struct resource *regs;
+ struct net_device *dev;
+ struct macb *bp;
+ struct phy_device *phydev;
+ u32 config;
+ int err = -ENXIO;
+ struct pinctrl *pinctrl;
+ const char *mac;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "no mmio resource defined\n");
+ goto err_out;
+ }
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ err = PTR_ERR(pinctrl);
+ if (err == -EPROBE_DEFER)
+ goto err_out;
+
+ dev_warn(&pdev->dev, "No pinctrl provided\n");
+ }
+
+ err = -ENOMEM;
+ dev = alloc_etherdev(sizeof(*bp));
+ if (!dev)
+ goto err_out;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* TODO: Actually, we have some interesting features... */
+ dev->features |= 0;
+
+ bp = netdev_priv(dev);
+ bp->pdev = pdev;
+ bp->dev = dev;
+
+ spin_lock_init(&bp->lock);
+ INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
+
+ bp->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(bp->pclk)) {
+ err = PTR_ERR(bp->pclk);
+ dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
+ goto err_out_free_dev;
+ }
+
+ bp->hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(bp->hclk)) {
+ err = PTR_ERR(bp->hclk);
+ dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+ goto err_out_free_dev;
+ }
+
+ bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+
+ err = clk_prepare_enable(bp->pclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ goto err_out_free_dev;
+ }
+
+ err = clk_prepare_enable(bp->hclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+ goto err_out_disable_pclk;
+ }
+
+ if (!IS_ERR(bp->tx_clk)) {
+ err = clk_prepare_enable(bp->tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
+ err);
+ goto err_out_disable_hclk;
+ }
+ }
+
+ bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ if (!bp->regs) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ err = -ENOMEM;
+ goto err_out_disable_clocks;
+ }
+
+ dev->irq = platform_get_irq(pdev, 0);
+ err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
+ dev->name, dev);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
+ dev->irq, err);
+ goto err_out_disable_clocks;
+ }
+
+ dev->netdev_ops = &macb_netdev_ops;
+ netif_napi_add(dev, &bp->napi, macb_poll, 64);
+ dev->ethtool_ops = &macb_ethtool_ops;
+
+ dev->base_addr = regs->start;
+
+ /* setup appropriated routines according to adapter type */
+ if (macb_is_gem(bp)) {
+ bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
+ bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
+ bp->macbgem_ops.mog_init_rings = gem_init_rings;
+ bp->macbgem_ops.mog_rx = gem_rx;
+ } else {
+ bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
+ bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
+ bp->macbgem_ops.mog_init_rings = macb_init_rings;
+ bp->macbgem_ops.mog_rx = macb_rx;
+ }
+
+ /* Set MII management clock divider */
+ config = macb_mdc_clk_div(bp);
+ config |= macb_dbw(bp);
+ macb_writel(bp, NCFGR, config);
+
+ mac = of_get_mac_address(pdev->dev.of_node);
+ if (mac)
+ memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ else
+ macb_get_hwaddr(bp);
+
+ err = of_get_phy_mode(pdev->dev.of_node);
+ if (err < 0) {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata && pdata->is_rmii)
+ bp->phy_interface = PHY_INTERFACE_MODE_RMII;
+ else
+ bp->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ bp->phy_interface = err;
+ }
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
+ else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+#if defined(CONFIG_ARCH_AT91)
+ macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
+ MACB_BIT(CLKEN)));
+#else
+ macb_or_gem_writel(bp, USRIO, 0);
+#endif
+ else
+#if defined(CONFIG_ARCH_AT91)
+ macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
+#else
+ macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
+#endif
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_disable_clocks;
+ }
+
+ err = macb_mii_init(bp);
+ if (err)
+ goto err_out_unregister_netdev;
+
+ platform_set_drvdata(pdev, dev);
+
+ netif_carrier_off(dev);
+
+ netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
+ macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
+ dev->irq, dev->dev_addr);
+
+ phydev = bp->phy_dev;
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+
+ return 0;
+
+err_out_unregister_netdev:
+ unregister_netdev(dev);
+err_out_disable_clocks:
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
+err_out_disable_hclk:
+ clk_disable_unprepare(bp->hclk);
+err_out_disable_pclk:
+ clk_disable_unprepare(bp->pclk);
+err_out_free_dev:
+ free_netdev(dev);
+err_out:
+ return err;
+}
+
+static int __exit macb_remove(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct macb *bp;
+
+ dev = platform_get_drvdata(pdev);
+
+ if (dev) {
+ bp = netdev_priv(dev);
+ if (bp->phy_dev)
+ phy_disconnect(bp->phy_dev);
+ mdiobus_unregister(bp->mii_bus);
+ kfree(bp->mii_bus->irq);
+ mdiobus_free(bp->mii_bus);
+ unregister_netdev(dev);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
+ clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(bp->pclk);
+ free_netdev(dev);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int macb_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(netdev);
+
+ netif_carrier_off(netdev);
+ netif_device_detach(netdev);
+
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
+ clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(bp->pclk);
+
+ return 0;
+}
+
+static int macb_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(netdev);
+
+ clk_prepare_enable(bp->pclk);
+ clk_prepare_enable(bp->hclk);
+ if (!IS_ERR(bp->tx_clk))
+ clk_prepare_enable(bp->tx_clk);
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
+
+static struct platform_driver macb_driver = {
+ .remove = __exit_p(macb_remove),
+ .driver = {
+ .name = "macb",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(macb_dt_ids),
+ .pm = &macb_pm_ops,
+ },
+};
+
+module_platform_driver_probe(macb_driver, macb_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_ALIAS("platform:macb");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
new file mode 100644
index 00000000000..51c02442160
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -0,0 +1,621 @@
+/*
+ * Atmel MACB Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _MACB_H
+#define _MACB_H
+
+#define MACB_GREGS_NBR 16
+#define MACB_GREGS_VERSION 1
+
+/* MACB register offsets */
+#define MACB_NCR 0x0000
+#define MACB_NCFGR 0x0004
+#define MACB_NSR 0x0008
+#define MACB_TAR 0x000c /* AT91RM9200 only */
+#define MACB_TCR 0x0010 /* AT91RM9200 only */
+#define MACB_TSR 0x0014
+#define MACB_RBQP 0x0018
+#define MACB_TBQP 0x001c
+#define MACB_RSR 0x0020
+#define MACB_ISR 0x0024
+#define MACB_IER 0x0028
+#define MACB_IDR 0x002c
+#define MACB_IMR 0x0030
+#define MACB_MAN 0x0034
+#define MACB_PTR 0x0038
+#define MACB_PFR 0x003c
+#define MACB_FTO 0x0040
+#define MACB_SCF 0x0044
+#define MACB_MCF 0x0048
+#define MACB_FRO 0x004c
+#define MACB_FCSE 0x0050
+#define MACB_ALE 0x0054
+#define MACB_DTF 0x0058
+#define MACB_LCOL 0x005c
+#define MACB_EXCOL 0x0060
+#define MACB_TUND 0x0064
+#define MACB_CSE 0x0068
+#define MACB_RRE 0x006c
+#define MACB_ROVR 0x0070
+#define MACB_RSE 0x0074
+#define MACB_ELE 0x0078
+#define MACB_RJA 0x007c
+#define MACB_USF 0x0080
+#define MACB_STE 0x0084
+#define MACB_RLE 0x0088
+#define MACB_TPF 0x008c
+#define MACB_HRB 0x0090
+#define MACB_HRT 0x0094
+#define MACB_SA1B 0x0098
+#define MACB_SA1T 0x009c
+#define MACB_SA2B 0x00a0
+#define MACB_SA2T 0x00a4
+#define MACB_SA3B 0x00a8
+#define MACB_SA3T 0x00ac
+#define MACB_SA4B 0x00b0
+#define MACB_SA4T 0x00b4
+#define MACB_TID 0x00b8
+#define MACB_TPQ 0x00bc
+#define MACB_USRIO 0x00c0
+#define MACB_WOL 0x00c4
+#define MACB_MID 0x00fc
+
+/* GEM register offsets. */
+#define GEM_NCFGR 0x0004
+#define GEM_USRIO 0x000c
+#define GEM_DMACFG 0x0010
+#define GEM_HRB 0x0080
+#define GEM_HRT 0x0084
+#define GEM_SA1B 0x0088
+#define GEM_SA1T 0x008C
+#define GEM_SA2B 0x0090
+#define GEM_SA2T 0x0094
+#define GEM_SA3B 0x0098
+#define GEM_SA3T 0x009C
+#define GEM_SA4B 0x00A0
+#define GEM_SA4T 0x00A4
+#define GEM_OTX 0x0100
+#define GEM_DCFG1 0x0280
+#define GEM_DCFG2 0x0284
+#define GEM_DCFG3 0x0288
+#define GEM_DCFG4 0x028c
+#define GEM_DCFG5 0x0290
+#define GEM_DCFG6 0x0294
+#define GEM_DCFG7 0x0298
+
+/* Bitfields in NCR */
+#define MACB_LB_OFFSET 0
+#define MACB_LB_SIZE 1
+#define MACB_LLB_OFFSET 1
+#define MACB_LLB_SIZE 1
+#define MACB_RE_OFFSET 2
+#define MACB_RE_SIZE 1
+#define MACB_TE_OFFSET 3
+#define MACB_TE_SIZE 1
+#define MACB_MPE_OFFSET 4
+#define MACB_MPE_SIZE 1
+#define MACB_CLRSTAT_OFFSET 5
+#define MACB_CLRSTAT_SIZE 1
+#define MACB_INCSTAT_OFFSET 6
+#define MACB_INCSTAT_SIZE 1
+#define MACB_WESTAT_OFFSET 7
+#define MACB_WESTAT_SIZE 1
+#define MACB_BP_OFFSET 8
+#define MACB_BP_SIZE 1
+#define MACB_TSTART_OFFSET 9
+#define MACB_TSTART_SIZE 1
+#define MACB_THALT_OFFSET 10
+#define MACB_THALT_SIZE 1
+#define MACB_NCR_TPF_OFFSET 11
+#define MACB_NCR_TPF_SIZE 1
+#define MACB_TZQ_OFFSET 12
+#define MACB_TZQ_SIZE 1
+
+/* Bitfields in NCFGR */
+#define MACB_SPD_OFFSET 0
+#define MACB_SPD_SIZE 1
+#define MACB_FD_OFFSET 1
+#define MACB_FD_SIZE 1
+#define MACB_BIT_RATE_OFFSET 2
+#define MACB_BIT_RATE_SIZE 1
+#define MACB_JFRAME_OFFSET 3
+#define MACB_JFRAME_SIZE 1
+#define MACB_CAF_OFFSET 4
+#define MACB_CAF_SIZE 1
+#define MACB_NBC_OFFSET 5
+#define MACB_NBC_SIZE 1
+#define MACB_NCFGR_MTI_OFFSET 6
+#define MACB_NCFGR_MTI_SIZE 1
+#define MACB_UNI_OFFSET 7
+#define MACB_UNI_SIZE 1
+#define MACB_BIG_OFFSET 8
+#define MACB_BIG_SIZE 1
+#define MACB_EAE_OFFSET 9
+#define MACB_EAE_SIZE 1
+#define MACB_CLK_OFFSET 10
+#define MACB_CLK_SIZE 2
+#define MACB_RTY_OFFSET 12
+#define MACB_RTY_SIZE 1
+#define MACB_PAE_OFFSET 13
+#define MACB_PAE_SIZE 1
+#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */
+#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */
+#define MACB_RBOF_OFFSET 14
+#define MACB_RBOF_SIZE 2
+#define MACB_RLCE_OFFSET 16
+#define MACB_RLCE_SIZE 1
+#define MACB_DRFCS_OFFSET 17
+#define MACB_DRFCS_SIZE 1
+#define MACB_EFRHD_OFFSET 18
+#define MACB_EFRHD_SIZE 1
+#define MACB_IRXFCS_OFFSET 19
+#define MACB_IRXFCS_SIZE 1
+
+/* GEM specific NCFGR bitfields. */
+#define GEM_GBE_OFFSET 10
+#define GEM_GBE_SIZE 1
+#define GEM_CLK_OFFSET 18
+#define GEM_CLK_SIZE 3
+#define GEM_DBW_OFFSET 21
+#define GEM_DBW_SIZE 2
+
+/* Constants for data bus width. */
+#define GEM_DBW32 0
+#define GEM_DBW64 1
+#define GEM_DBW128 2
+
+/* Bitfields in DMACFG. */
+#define GEM_FBLDO_OFFSET 0
+#define GEM_FBLDO_SIZE 5
+#define GEM_ENDIA_OFFSET 7
+#define GEM_ENDIA_SIZE 1
+#define GEM_RXBMS_OFFSET 8
+#define GEM_RXBMS_SIZE 2
+#define GEM_TXPBMS_OFFSET 10
+#define GEM_TXPBMS_SIZE 1
+#define GEM_TXCOEN_OFFSET 11
+#define GEM_TXCOEN_SIZE 1
+#define GEM_RXBS_OFFSET 16
+#define GEM_RXBS_SIZE 8
+#define GEM_DDRP_OFFSET 24
+#define GEM_DDRP_SIZE 1
+
+
+/* Bitfields in NSR */
+#define MACB_NSR_LINK_OFFSET 0
+#define MACB_NSR_LINK_SIZE 1
+#define MACB_MDIO_OFFSET 1
+#define MACB_MDIO_SIZE 1
+#define MACB_IDLE_OFFSET 2
+#define MACB_IDLE_SIZE 1
+
+/* Bitfields in TSR */
+#define MACB_UBR_OFFSET 0
+#define MACB_UBR_SIZE 1
+#define MACB_COL_OFFSET 1
+#define MACB_COL_SIZE 1
+#define MACB_TSR_RLE_OFFSET 2
+#define MACB_TSR_RLE_SIZE 1
+#define MACB_TGO_OFFSET 3
+#define MACB_TGO_SIZE 1
+#define MACB_BEX_OFFSET 4
+#define MACB_BEX_SIZE 1
+#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
+#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */
+#define MACB_COMP_OFFSET 5
+#define MACB_COMP_SIZE 1
+#define MACB_UND_OFFSET 6
+#define MACB_UND_SIZE 1
+
+/* Bitfields in RSR */
+#define MACB_BNA_OFFSET 0
+#define MACB_BNA_SIZE 1
+#define MACB_REC_OFFSET 1
+#define MACB_REC_SIZE 1
+#define MACB_OVR_OFFSET 2
+#define MACB_OVR_SIZE 1
+
+/* Bitfields in ISR/IER/IDR/IMR */
+#define MACB_MFD_OFFSET 0
+#define MACB_MFD_SIZE 1
+#define MACB_RCOMP_OFFSET 1
+#define MACB_RCOMP_SIZE 1
+#define MACB_RXUBR_OFFSET 2
+#define MACB_RXUBR_SIZE 1
+#define MACB_TXUBR_OFFSET 3
+#define MACB_TXUBR_SIZE 1
+#define MACB_ISR_TUND_OFFSET 4
+#define MACB_ISR_TUND_SIZE 1
+#define MACB_ISR_RLE_OFFSET 5
+#define MACB_ISR_RLE_SIZE 1
+#define MACB_TXERR_OFFSET 6
+#define MACB_TXERR_SIZE 1
+#define MACB_TCOMP_OFFSET 7
+#define MACB_TCOMP_SIZE 1
+#define MACB_ISR_LINK_OFFSET 9
+#define MACB_ISR_LINK_SIZE 1
+#define MACB_ISR_ROVR_OFFSET 10
+#define MACB_ISR_ROVR_SIZE 1
+#define MACB_HRESP_OFFSET 11
+#define MACB_HRESP_SIZE 1
+#define MACB_PFR_OFFSET 12
+#define MACB_PFR_SIZE 1
+#define MACB_PTZ_OFFSET 13
+#define MACB_PTZ_SIZE 1
+
+/* Bitfields in MAN */
+#define MACB_DATA_OFFSET 0
+#define MACB_DATA_SIZE 16
+#define MACB_CODE_OFFSET 16
+#define MACB_CODE_SIZE 2
+#define MACB_REGA_OFFSET 18
+#define MACB_REGA_SIZE 5
+#define MACB_PHYA_OFFSET 23
+#define MACB_PHYA_SIZE 5
+#define MACB_RW_OFFSET 28
+#define MACB_RW_SIZE 2
+#define MACB_SOF_OFFSET 30
+#define MACB_SOF_SIZE 2
+
+/* Bitfields in USRIO (AVR32) */
+#define MACB_MII_OFFSET 0
+#define MACB_MII_SIZE 1
+#define MACB_EAM_OFFSET 1
+#define MACB_EAM_SIZE 1
+#define MACB_TX_PAUSE_OFFSET 2
+#define MACB_TX_PAUSE_SIZE 1
+#define MACB_TX_PAUSE_ZERO_OFFSET 3
+#define MACB_TX_PAUSE_ZERO_SIZE 1
+
+/* Bitfields in USRIO (AT91) */
+#define MACB_RMII_OFFSET 0
+#define MACB_RMII_SIZE 1
+#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
+#define GEM_RGMII_SIZE 1
+#define MACB_CLKEN_OFFSET 1
+#define MACB_CLKEN_SIZE 1
+
+/* Bitfields in WOL */
+#define MACB_IP_OFFSET 0
+#define MACB_IP_SIZE 16
+#define MACB_MAG_OFFSET 16
+#define MACB_MAG_SIZE 1
+#define MACB_ARP_OFFSET 17
+#define MACB_ARP_SIZE 1
+#define MACB_SA1_OFFSET 18
+#define MACB_SA1_SIZE 1
+#define MACB_WOL_MTI_OFFSET 19
+#define MACB_WOL_MTI_SIZE 1
+
+/* Bitfields in MID */
+#define MACB_IDNUM_OFFSET 16
+#define MACB_IDNUM_SIZE 16
+#define MACB_REV_OFFSET 0
+#define MACB_REV_SIZE 16
+
+/* Bitfields in DCFG1. */
+#define GEM_IRQCOR_OFFSET 23
+#define GEM_IRQCOR_SIZE 1
+#define GEM_DBWDEF_OFFSET 25
+#define GEM_DBWDEF_SIZE 3
+
+/* Constants for CLK */
+#define MACB_CLK_DIV8 0
+#define MACB_CLK_DIV16 1
+#define MACB_CLK_DIV32 2
+#define MACB_CLK_DIV64 3
+
+/* GEM specific constants for CLK. */
+#define GEM_CLK_DIV8 0
+#define GEM_CLK_DIV16 1
+#define GEM_CLK_DIV32 2
+#define GEM_CLK_DIV48 3
+#define GEM_CLK_DIV64 4
+#define GEM_CLK_DIV96 5
+
+/* Constants for MAN register */
+#define MACB_MAN_SOF 1
+#define MACB_MAN_WRITE 1
+#define MACB_MAN_READ 2
+#define MACB_MAN_CODE 2
+
+/* Capability mask bits */
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x1
+
+/* Bit manipulation macros */
+#define MACB_BIT(name) \
+ (1 << MACB_##name##_OFFSET)
+#define MACB_BF(name,value) \
+ (((value) & ((1 << MACB_##name##_SIZE) - 1)) \
+ << MACB_##name##_OFFSET)
+#define MACB_BFEXT(name,value)\
+ (((value) >> MACB_##name##_OFFSET) \
+ & ((1 << MACB_##name##_SIZE) - 1))
+#define MACB_BFINS(name,value,old) \
+ (((old) & ~(((1 << MACB_##name##_SIZE) - 1) \
+ << MACB_##name##_OFFSET)) \
+ | MACB_BF(name,value))
+
+#define GEM_BIT(name) \
+ (1 << GEM_##name##_OFFSET)
+#define GEM_BF(name, value) \
+ (((value) & ((1 << GEM_##name##_SIZE) - 1)) \
+ << GEM_##name##_OFFSET)
+#define GEM_BFEXT(name, value)\
+ (((value) >> GEM_##name##_OFFSET) \
+ & ((1 << GEM_##name##_SIZE) - 1))
+#define GEM_BFINS(name, value, old) \
+ (((old) & ~(((1 << GEM_##name##_SIZE) - 1) \
+ << GEM_##name##_OFFSET)) \
+ | GEM_BF(name, value))
+
+/* Register access macros */
+#define macb_readl(port,reg) \
+ __raw_readl((port)->regs + MACB_##reg)
+#define macb_writel(port,reg,value) \
+ __raw_writel((value), (port)->regs + MACB_##reg)
+#define gem_readl(port, reg) \
+ __raw_readl((port)->regs + GEM_##reg)
+#define gem_writel(port, reg, value) \
+ __raw_writel((value), (port)->regs + GEM_##reg)
+
+/*
+ * Conditional GEM/MACB macros. These perform the operation to the correct
+ * register dependent on whether the device is a GEM or a MACB. For registers
+ * and bitfields that are common across both devices, use macb_{read,write}l
+ * to avoid the cost of the conditional.
+ */
+#define macb_or_gem_writel(__bp, __reg, __value) \
+ ({ \
+ if (macb_is_gem((__bp))) \
+ gem_writel((__bp), __reg, __value); \
+ else \
+ macb_writel((__bp), __reg, __value); \
+ })
+
+#define macb_or_gem_readl(__bp, __reg) \
+ ({ \
+ u32 __v; \
+ if (macb_is_gem((__bp))) \
+ __v = gem_readl((__bp), __reg); \
+ else \
+ __v = macb_readl((__bp), __reg); \
+ __v; \
+ })
+
+/**
+ * struct macb_dma_desc - Hardware DMA descriptor
+ * @addr: DMA address of data buffer
+ * @ctrl: Control and status bits
+ */
+struct macb_dma_desc {
+ u32 addr;
+ u32 ctrl;
+};
+
+/* DMA descriptor bitfields */
+#define MACB_RX_USED_OFFSET 0
+#define MACB_RX_USED_SIZE 1
+#define MACB_RX_WRAP_OFFSET 1
+#define MACB_RX_WRAP_SIZE 1
+#define MACB_RX_WADDR_OFFSET 2
+#define MACB_RX_WADDR_SIZE 30
+
+#define MACB_RX_FRMLEN_OFFSET 0
+#define MACB_RX_FRMLEN_SIZE 12
+#define MACB_RX_OFFSET_OFFSET 12
+#define MACB_RX_OFFSET_SIZE 2
+#define MACB_RX_SOF_OFFSET 14
+#define MACB_RX_SOF_SIZE 1
+#define MACB_RX_EOF_OFFSET 15
+#define MACB_RX_EOF_SIZE 1
+#define MACB_RX_CFI_OFFSET 16
+#define MACB_RX_CFI_SIZE 1
+#define MACB_RX_VLAN_PRI_OFFSET 17
+#define MACB_RX_VLAN_PRI_SIZE 3
+#define MACB_RX_PRI_TAG_OFFSET 20
+#define MACB_RX_PRI_TAG_SIZE 1
+#define MACB_RX_VLAN_TAG_OFFSET 21
+#define MACB_RX_VLAN_TAG_SIZE 1
+#define MACB_RX_TYPEID_MATCH_OFFSET 22
+#define MACB_RX_TYPEID_MATCH_SIZE 1
+#define MACB_RX_SA4_MATCH_OFFSET 23
+#define MACB_RX_SA4_MATCH_SIZE 1
+#define MACB_RX_SA3_MATCH_OFFSET 24
+#define MACB_RX_SA3_MATCH_SIZE 1
+#define MACB_RX_SA2_MATCH_OFFSET 25
+#define MACB_RX_SA2_MATCH_SIZE 1
+#define MACB_RX_SA1_MATCH_OFFSET 26
+#define MACB_RX_SA1_MATCH_SIZE 1
+#define MACB_RX_EXT_MATCH_OFFSET 28
+#define MACB_RX_EXT_MATCH_SIZE 1
+#define MACB_RX_UHASH_MATCH_OFFSET 29
+#define MACB_RX_UHASH_MATCH_SIZE 1
+#define MACB_RX_MHASH_MATCH_OFFSET 30
+#define MACB_RX_MHASH_MATCH_SIZE 1
+#define MACB_RX_BROADCAST_OFFSET 31
+#define MACB_RX_BROADCAST_SIZE 1
+
+#define MACB_TX_FRMLEN_OFFSET 0
+#define MACB_TX_FRMLEN_SIZE 11
+#define MACB_TX_LAST_OFFSET 15
+#define MACB_TX_LAST_SIZE 1
+#define MACB_TX_NOCRC_OFFSET 16
+#define MACB_TX_NOCRC_SIZE 1
+#define MACB_TX_BUF_EXHAUSTED_OFFSET 27
+#define MACB_TX_BUF_EXHAUSTED_SIZE 1
+#define MACB_TX_UNDERRUN_OFFSET 28
+#define MACB_TX_UNDERRUN_SIZE 1
+#define MACB_TX_ERROR_OFFSET 29
+#define MACB_TX_ERROR_SIZE 1
+#define MACB_TX_WRAP_OFFSET 30
+#define MACB_TX_WRAP_SIZE 1
+#define MACB_TX_USED_OFFSET 31
+#define MACB_TX_USED_SIZE 1
+
+/**
+ * struct macb_tx_skb - data about an skb which is being transmitted
+ * @skb: skb currently being transmitted
+ * @mapping: DMA address of the skb's data buffer
+ */
+struct macb_tx_skb {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+/*
+ * Hardware-collected statistics. Used when updating the network
+ * device stats by a periodic timer.
+ */
+struct macb_stats {
+ u32 rx_pause_frames;
+ u32 tx_ok;
+ u32 tx_single_cols;
+ u32 tx_multiple_cols;
+ u32 rx_ok;
+ u32 rx_fcs_errors;
+ u32 rx_align_errors;
+ u32 tx_deferred;
+ u32 tx_late_cols;
+ u32 tx_excessive_cols;
+ u32 tx_underruns;
+ u32 tx_carrier_errors;
+ u32 rx_resource_errors;
+ u32 rx_overruns;
+ u32 rx_symbol_errors;
+ u32 rx_oversize_pkts;
+ u32 rx_jabbers;
+ u32 rx_undersize_pkts;
+ u32 sqe_test_errors;
+ u32 rx_length_mismatch;
+ u32 tx_pause_frames;
+};
+
+struct gem_stats {
+ u32 tx_octets_31_0;
+ u32 tx_octets_47_32;
+ u32 tx_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames;
+ u32 tx_64_byte_frames;
+ u32 tx_65_127_byte_frames;
+ u32 tx_128_255_byte_frames;
+ u32 tx_256_511_byte_frames;
+ u32 tx_512_1023_byte_frames;
+ u32 tx_1024_1518_byte_frames;
+ u32 tx_greater_than_1518_byte_frames;
+ u32 tx_underrun;
+ u32 tx_single_collision_frames;
+ u32 tx_multiple_collision_frames;
+ u32 tx_excessive_collisions;
+ u32 tx_late_collisions;
+ u32 tx_deferred_frames;
+ u32 tx_carrier_sense_errors;
+ u32 rx_octets_31_0;
+ u32 rx_octets_47_32;
+ u32 rx_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames;
+ u32 rx_64_byte_frames;
+ u32 rx_65_127_byte_frames;
+ u32 rx_128_255_byte_frames;
+ u32 rx_256_511_byte_frames;
+ u32 rx_512_1023_byte_frames;
+ u32 rx_1024_1518_byte_frames;
+ u32 rx_greater_than_1518_byte_frames;
+ u32 rx_undersized_frames;
+ u32 rx_oversize_frames;
+ u32 rx_jabbers;
+ u32 rx_frame_check_sequence_errors;
+ u32 rx_length_field_frame_errors;
+ u32 rx_symbol_errors;
+ u32 rx_alignment_errors;
+ u32 rx_resource_errors;
+ u32 rx_overruns;
+ u32 rx_ip_header_checksum_errors;
+ u32 rx_tcp_checksum_errors;
+ u32 rx_udp_checksum_errors;
+};
+
+struct macb;
+
+struct macb_or_gem_ops {
+ int (*mog_alloc_rx_buffers)(struct macb *bp);
+ void (*mog_free_rx_buffers)(struct macb *bp);
+ void (*mog_init_rings)(struct macb *bp);
+ int (*mog_rx)(struct macb *bp, int budget);
+};
+
+struct macb {
+ void __iomem *regs;
+
+ unsigned int rx_tail;
+ unsigned int rx_prepared_head;
+ struct macb_dma_desc *rx_ring;
+ struct sk_buff **rx_skbuff;
+ void *rx_buffers;
+ size_t rx_buffer_size;
+
+ unsigned int tx_head, tx_tail;
+ struct macb_dma_desc *tx_ring;
+ struct macb_tx_skb *tx_skb;
+
+ spinlock_t lock;
+ struct platform_device *pdev;
+ struct clk *pclk;
+ struct clk *hclk;
+ struct clk *tx_clk;
+ struct net_device *dev;
+ struct napi_struct napi;
+ struct work_struct tx_error_task;
+ struct net_device_stats stats;
+ union {
+ struct macb_stats macb;
+ struct gem_stats gem;
+ } hw_stats;
+
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_buffers_dma;
+
+ struct macb_or_gem_ops macbgem_ops;
+
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+
+ u32 caps;
+
+ phy_interface_t phy_interface;
+
+ /* AT91RM9200 transmit */
+ struct sk_buff *skb; /* holds skb until xmit interrupt completes */
+ dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
+ int skb_length; /* saved skb length for pci_unmap_single */
+};
+
+extern const struct ethtool_ops macb_ethtool_ops;
+
+int macb_mii_init(struct macb *bp);
+int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+struct net_device_stats *macb_get_stats(struct net_device *dev);
+void macb_set_rx_mode(struct net_device *dev);
+void macb_set_hwaddr(struct macb *bp);
+void macb_get_hwaddr(struct macb *bp);
+
+static inline bool macb_is_gem(struct macb *bp)
+{
+ return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
+}
+
+#endif /* _MACB_H */