aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cadence/macb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cadence/macb.c')
-rw-r--r--drivers/net/ethernet/cadence/macb.c1182
1 files changed, 862 insertions, 320 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 23200680d4c..e9daa072ebb 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -14,8 +14,11 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/circ_buf.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -25,38 +28,77 @@
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
#include "macb.h"
-#define RX_BUFFER_SIZE 128
-#define RX_RING_SIZE 512
-#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE)
+#define MACB_RX_BUFFER_SIZE 128
+#define RX_BUFFER_MULTIPLE 64 /* bytes */
+#define RX_RING_SIZE 512 /* must be power of 2 */
+#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
-/* Make the IP header word-aligned (the ethernet header is 14 bytes) */
-#define RX_OFFSET 2
+#define TX_RING_SIZE 128 /* must be power of 2 */
+#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
-#define TX_RING_SIZE 128
-#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1)
-#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
+/* level of occupied TX descriptors under which we wake up TX process */
+#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
-#define TX_RING_GAP(bp) \
- (TX_RING_SIZE - (bp)->tx_pending)
-#define TX_BUFFS_AVAIL(bp) \
- (((bp)->tx_tail <= (bp)->tx_head) ? \
- (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \
- (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
-#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
+#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
+ | MACB_BIT(ISR_ROVR))
+#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
+ | MACB_BIT(ISR_RLE) \
+ | MACB_BIT(TXERR))
+#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
-#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1))
+/*
+ * Graceful stop timeouts in us. We should allow up to
+ * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
+ */
+#define MACB_HALT_TIMEOUT 1230
-/* minimum number of free TX descriptors before waking up TX process */
-#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
+/* Ring buffer accessors */
+static unsigned int macb_tx_ring_wrap(unsigned int index)
+{
+ return index & (TX_RING_SIZE - 1);
+}
-#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
- | MACB_BIT(ISR_ROVR))
+static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
+{
+ return &bp->tx_ring[macb_tx_ring_wrap(index)];
+}
+
+static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
+{
+ return &bp->tx_skb[macb_tx_ring_wrap(index)];
+}
-static void __macb_set_hwaddr(struct macb *bp)
+static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
+{
+ dma_addr_t offset;
+
+ offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
+
+ return bp->tx_ring_dma + offset;
+}
+
+static unsigned int macb_rx_ring_wrap(unsigned int index)
+{
+ return index & (RX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+{
+ return &bp->rx_ring[macb_rx_ring_wrap(index)];
+}
+
+static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+{
+ return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
+}
+
+void macb_set_hwaddr(struct macb *bp)
{
u32 bottom;
u16 top;
@@ -65,31 +107,58 @@ static void __macb_set_hwaddr(struct macb *bp)
macb_or_gem_writel(bp, SA1B, bottom);
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
macb_or_gem_writel(bp, SA1T, top);
+
+ /* Clear unused address register sets */
+ macb_or_gem_writel(bp, SA2B, 0);
+ macb_or_gem_writel(bp, SA2T, 0);
+ macb_or_gem_writel(bp, SA3B, 0);
+ macb_or_gem_writel(bp, SA3T, 0);
+ macb_or_gem_writel(bp, SA4B, 0);
+ macb_or_gem_writel(bp, SA4T, 0);
}
+EXPORT_SYMBOL_GPL(macb_set_hwaddr);
-static void __init macb_get_hwaddr(struct macb *bp)
+void macb_get_hwaddr(struct macb *bp)
{
+ struct macb_platform_data *pdata;
u32 bottom;
u16 top;
u8 addr[6];
+ int i;
- bottom = macb_or_gem_readl(bp, SA1B);
- top = macb_or_gem_readl(bp, SA1T);
-
- addr[0] = bottom & 0xff;
- addr[1] = (bottom >> 8) & 0xff;
- addr[2] = (bottom >> 16) & 0xff;
- addr[3] = (bottom >> 24) & 0xff;
- addr[4] = top & 0xff;
- addr[5] = (top >> 8) & 0xff;
+ pdata = dev_get_platdata(&bp->pdev->dev);
+
+ /* Check all 4 address register for vaild address */
+ for (i = 0; i < 4; i++) {
+ bottom = macb_or_gem_readl(bp, SA1B + i * 8);
+ top = macb_or_gem_readl(bp, SA1T + i * 8);
+
+ if (pdata && pdata->rev_eth_addr) {
+ addr[5] = bottom & 0xff;
+ addr[4] = (bottom >> 8) & 0xff;
+ addr[3] = (bottom >> 16) & 0xff;
+ addr[2] = (bottom >> 24) & 0xff;
+ addr[1] = top & 0xff;
+ addr[0] = (top & 0xff00) >> 8;
+ } else {
+ addr[0] = bottom & 0xff;
+ addr[1] = (bottom >> 8) & 0xff;
+ addr[2] = (bottom >> 16) & 0xff;
+ addr[3] = (bottom >> 24) & 0xff;
+ addr[4] = top & 0xff;
+ addr[5] = (top >> 8) & 0xff;
+ }
- if (is_valid_ether_addr(addr)) {
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
- } else {
- netdev_info(bp->dev, "invalid hw address, using random\n");
- random_ether_addr(bp->dev->dev_addr);
+ if (is_valid_ether_addr(addr)) {
+ memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ return;
+ }
}
+
+ netdev_info(bp->dev, "invalid hw address, using random\n");
+ eth_hw_addr_random(bp->dev);
}
+EXPORT_SYMBOL_GPL(macb_get_hwaddr);
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
@@ -130,9 +199,45 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
-static int macb_mdio_reset(struct mii_bus *bus)
+/**
+ * macb_set_tx_clk() - Set a clock to a new frequency
+ * @clk Pointer to the clock to change
+ * @rate New frequency in Hz
+ * @dev Pointer to the struct net_device
+ */
+static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
{
- return 0;
+ long ferr, rate, rate_rounded;
+
+ switch (speed) {
+ case SPEED_10:
+ rate = 2500000;
+ break;
+ case SPEED_100:
+ rate = 25000000;
+ break;
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+ default:
+ return;
+ }
+
+ rate_rounded = clk_round_rate(clk, rate);
+ if (rate_rounded < 0)
+ return;
+
+ /* RGMII allows 50 ppm frequency error. Test and warn if this limit
+ * is not satisfied.
+ */
+ ferr = abs(rate_rounded - rate);
+ ferr = DIV_ROUND_UP(ferr, rate / 100000);
+ if (ferr > 5)
+ netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+ rate);
+
+ if (clk_set_rate(clk, rate_rounded))
+ netdev_err(dev, "adjusting tx_clk failed.\n");
}
static void macb_handle_link_change(struct net_device *dev)
@@ -152,13 +257,17 @@ static void macb_handle_link_change(struct net_device *dev)
reg = macb_readl(bp, NCFGR);
reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+ if (macb_is_gem(bp))
+ reg &= ~GEM_BIT(GBE);
if (phydev->duplex)
reg |= MACB_BIT(FD);
if (phydev->speed == SPEED_100)
reg |= MACB_BIT(SPD);
+ if (phydev->speed == SPEED_1000)
+ reg |= GEM_BIT(GBE);
- macb_writel(bp, NCFGR, reg);
+ macb_or_gem_writel(bp, NCFGR, reg);
bp->speed = phydev->speed;
bp->duplex = phydev->duplex;
@@ -178,14 +287,20 @@ static void macb_handle_link_change(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
+ if (!IS_ERR(bp->tx_clk))
+ macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
if (status_change) {
- if (phydev->link)
+ if (phydev->link) {
+ netif_carrier_on(dev);
netdev_info(dev, "link up (%d/%s)\n",
phydev->speed,
phydev->duplex == DUPLEX_FULL ?
"Full" : "Half");
- else
+ } else {
+ netif_carrier_off(dev);
netdev_info(dev, "link down\n");
+ }
}
}
@@ -193,19 +308,28 @@ static void macb_handle_link_change(struct net_device *dev)
static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ struct macb_platform_data *pdata;
struct phy_device *phydev;
+ int phy_irq;
int ret;
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
netdev_err(dev, "no PHY found\n");
- return -1;
+ return -ENXIO;
}
- /* TODO : add pin_irq */
+ pdata = dev_get_platdata(&bp->pdev->dev);
+ if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
+ ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+ if (!ret) {
+ phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+ phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ }
+ }
/* attach the mac to the phy */
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
bp->phy_interface);
if (ret) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -213,7 +337,10 @@ static int macb_mii_probe(struct net_device *dev)
}
/* mask with MAC supported features */
- phydev->supported &= PHY_BASIC_FEATURES;
+ if (macb_is_gem(bp))
+ phydev->supported &= PHY_GBIT_FEATURES;
+ else
+ phydev->supported &= PHY_BASIC_FEATURES;
phydev->advertising = phydev->supported;
@@ -225,9 +352,10 @@ static int macb_mii_probe(struct net_device *dev)
return 0;
}
-static int macb_mii_init(struct macb *bp)
+int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
+ struct device_node *np;
int err = -ENXIO, i;
/* Enable management port */
@@ -242,15 +370,11 @@ static int macb_mii_init(struct macb *bp)
bp->mii_bus->name = "MACB_mii_bus";
bp->mii_bus->read = &macb_mdio_read;
bp->mii_bus->write = &macb_mdio_write;
- bp->mii_bus->reset = &macb_mdio_reset;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->dev->dev;
- pdata = bp->pdev->dev.platform_data;
-
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
+ pdata = dev_get_platdata(&bp->pdev->dev);
bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!bp->mii_bus->irq) {
@@ -258,17 +382,45 @@ static int macb_mii_init(struct macb *bp)
goto err_out_free_mdiobus;
}
- for (i = 0; i < PHY_MAX_ADDR; i++)
- bp->mii_bus->irq[i] = PHY_POLL;
-
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
- if (mdiobus_register(bp->mii_bus))
+ np = bp->pdev->dev.of_node;
+ if (np) {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
+
+ /* fallback to standard phy registration if no phy were
+ found during dt phy registration */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
+ break;
+ }
+ }
+
+ if (err)
+ goto err_out_unregister_bus;
+ }
+ } else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
+
+ err = mdiobus_register(bp->mii_bus);
+ }
+
+ if (err)
goto err_out_free_mdio_irq;
- if (macb_mii_probe(bp->dev) != 0) {
+ err = macb_mii_probe(bp->dev);
+ if (err)
goto err_out_unregister_bus;
- }
return 0;
@@ -281,6 +433,7 @@ err_out_free_mdiobus:
err_out:
return err;
}
+EXPORT_SYMBOL_GPL(macb_mii_init);
static void macb_update_stats(struct macb *bp)
{
@@ -294,184 +447,391 @@ static void macb_update_stats(struct macb *bp)
*p += __raw_readl(reg);
}
-static void macb_tx(struct macb *bp)
+static int macb_halt_tx(struct macb *bp)
{
- unsigned int tail;
- unsigned int head;
- u32 status;
+ unsigned long halt_time, timeout;
+ u32 status;
- status = macb_readl(bp, TSR);
- macb_writel(bp, TSR, status);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
- netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status);
+ timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
+ do {
+ halt_time = jiffies;
+ status = macb_readl(bp, TSR);
+ if (!(status & MACB_BIT(TGO)))
+ return 0;
- if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
- int i;
- netdev_err(bp->dev, "TX %s, resetting buffers\n",
- status & MACB_BIT(UND) ?
- "underrun" : "retry limit exceeded");
+ usleep_range(10, 250);
+ } while (time_before(halt_time, timeout));
- /* Transfer ongoing, disable transmitter, to avoid confusion */
- if (status & MACB_BIT(TGO))
- macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
+ return -ETIMEDOUT;
+}
- head = bp->tx_head;
+static void macb_tx_error_task(struct work_struct *work)
+{
+ struct macb *bp = container_of(work, struct macb, tx_error_task);
+ struct macb_tx_skb *tx_skb;
+ struct sk_buff *skb;
+ unsigned int tail;
- /*Mark all the buffer as used to avoid sending a lost buffer*/
- for (i = 0; i < TX_RING_SIZE; i++)
- bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
+ bp->tx_tail, bp->tx_head);
- /* Add wrap bit */
- bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+ /* Make sure nobody is trying to queue up new packets */
+ netif_stop_queue(bp->dev);
- /* free transmit buffer in upper layer*/
- for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
- struct ring_info *rp = &bp->tx_skb[tail];
- struct sk_buff *skb = rp->skb;
+ /*
+ * Stop transmission now
+ * (in case we have just queued new packets)
+ */
+ if (macb_halt_tx(bp))
+ /* Just complain for now, reinitializing TX path can be good */
+ netdev_err(bp->dev, "BUG: halt tx timed out\n");
- BUG_ON(skb == NULL);
+ /* No need for the lock here as nobody will interrupt us anymore */
- rmb();
+ /*
+ * Treat frames in TX queue including the ones that caused the error.
+ * Free transmit buffers in upper layer.
+ */
+ for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
+ struct macb_dma_desc *desc;
+ u32 ctrl;
+
+ desc = macb_tx_desc(bp, tail);
+ ctrl = desc->ctrl;
+ tx_skb = macb_tx_skb(bp, tail);
+ skb = tx_skb->skb;
+
+ if (ctrl & MACB_BIT(TX_USED)) {
+ netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
+ macb_tx_ring_wrap(tail), skb->data);
+ bp->stats.tx_packets++;
+ bp->stats.tx_bytes += skb->len;
+ } else {
+ /*
+ * "Buffers exhausted mid-frame" errors may only happen
+ * if the driver is buggy, so complain loudly about those.
+ * Statistics are updated by hardware.
+ */
+ if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
+ netdev_err(bp->dev,
+ "BUG: TX buffers exhausted mid-frame\n");
- dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
- DMA_TO_DEVICE);
- rp->skb = NULL;
- dev_kfree_skb_irq(skb);
+ desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
- bp->tx_head = bp->tx_tail = 0;
-
- /* Enable the transmitter again */
- if (status & MACB_BIT(TGO))
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
+ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+ DMA_TO_DEVICE);
+ tx_skb->skb = NULL;
+ dev_kfree_skb(skb);
}
- if (!(status & MACB_BIT(COMP)))
- /*
- * This may happen when a buffer becomes complete
- * between reading the ISR and scanning the
- * descriptors. Nothing to worry about.
- */
- return;
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ /* Reinitialize the TX desc queue */
+ macb_writel(bp, TBQP, bp->tx_ring_dma);
+ /* Make TX ring reflect state of hardware */
+ bp->tx_head = bp->tx_tail = 0;
+
+ /* Now we are ready to start transmission again */
+ netif_wake_queue(bp->dev);
+
+ /* Housework before enabling TX IRQ */
+ macb_writel(bp, TSR, macb_readl(bp, TSR));
+ macb_writel(bp, IER, MACB_TX_INT_FLAGS);
+}
+
+static void macb_tx_interrupt(struct macb *bp)
+{
+ unsigned int tail;
+ unsigned int head;
+ u32 status;
+
+ status = macb_readl(bp, TSR);
+ macb_writel(bp, TSR, status);
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(TCOMP));
+
+ netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
+ (unsigned long)status);
head = bp->tx_head;
- for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
- struct ring_info *rp = &bp->tx_skb[tail];
- struct sk_buff *skb = rp->skb;
- u32 bufstat;
+ for (tail = bp->tx_tail; tail != head; tail++) {
+ struct macb_tx_skb *tx_skb;
+ struct sk_buff *skb;
+ struct macb_dma_desc *desc;
+ u32 ctrl;
- BUG_ON(skb == NULL);
+ desc = macb_tx_desc(bp, tail);
+ /* Make hw descriptor updates visible to CPU */
rmb();
- bufstat = bp->tx_ring[tail].ctrl;
- if (!(bufstat & MACB_BIT(TX_USED)))
+ ctrl = desc->ctrl;
+
+ if (!(ctrl & MACB_BIT(TX_USED)))
break;
- netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n",
- tail, skb->data);
- dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
+ tx_skb = macb_tx_skb(bp, tail);
+ skb = tx_skb->skb;
+
+ netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
+ macb_tx_ring_wrap(tail), skb->data);
+ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
DMA_TO_DEVICE);
bp->stats.tx_packets++;
bp->stats.tx_bytes += skb->len;
- rp->skb = NULL;
+ tx_skb->skb = NULL;
dev_kfree_skb_irq(skb);
}
bp->tx_tail = tail;
- if (netif_queue_stopped(bp->dev) &&
- TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
+ if (netif_queue_stopped(bp->dev)
+ && CIRC_CNT(bp->tx_head, bp->tx_tail,
+ TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
netif_wake_queue(bp->dev);
}
+static void gem_rx_refill(struct macb *bp)
+{
+ unsigned int entry;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+ entry = macb_rx_ring_wrap(bp->rx_prepared_head);
+
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ bp->rx_prepared_head++;
+
+ if (bp->rx_skbuff[entry] == NULL) {
+ /* allocate sk_buff for this free entry in ring */
+ skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
+ if (unlikely(skb == NULL)) {
+ netdev_err(bp->dev,
+ "Unable to allocate sk_buff\n");
+ break;
+ }
+
+ /* now fill corresponding descriptor entry */
+ paddr = dma_map_single(&bp->pdev->dev, skb->data,
+ bp->rx_buffer_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, paddr)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ bp->rx_skbuff[entry] = skb;
+
+ if (entry == RX_RING_SIZE - 1)
+ paddr |= MACB_BIT(RX_WRAP);
+ bp->rx_ring[entry].addr = paddr;
+ bp->rx_ring[entry].ctrl = 0;
+
+ /* properly align Ethernet header */
+ skb_reserve(skb, NET_IP_ALIGN);
+ }
+ }
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
+ bp->rx_prepared_head, bp->rx_tail);
+}
+
+/* Mark DMA descriptors from begin up to and not including end as unused */
+static void discard_partial_frame(struct macb *bp, unsigned int begin,
+ unsigned int end)
+{
+ unsigned int frag;
+
+ for (frag = begin; frag != end; frag++) {
+ struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
+ }
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ /*
+ * When this happens, the hardware stats registers for
+ * whatever caused this is updated, so we don't have to record
+ * anything.
+ */
+}
+
+static int gem_rx(struct macb *bp, int budget)
+{
+ unsigned int len;
+ unsigned int entry;
+ struct sk_buff *skb;
+ struct macb_dma_desc *desc;
+ int count = 0;
+
+ while (count < budget) {
+ u32 addr, ctrl;
+
+ entry = macb_rx_ring_wrap(bp->rx_tail);
+ desc = &bp->rx_ring[entry];
+
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ addr = desc->addr;
+ ctrl = desc->ctrl;
+
+ if (!(addr & MACB_BIT(RX_USED)))
+ break;
+
+ bp->rx_tail++;
+ count++;
+
+ if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
+ netdev_err(bp->dev,
+ "not whole frame pointed by descriptor\n");
+ bp->stats.rx_dropped++;
+ break;
+ }
+ skb = bp->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ netdev_err(bp->dev,
+ "inconsistent Rx descriptor chain\n");
+ bp->stats.rx_dropped++;
+ break;
+ }
+ /* now everything is ready for receiving packet */
+ bp->rx_skbuff[entry] = NULL;
+ len = MACB_BFEXT(RX_FRMLEN, ctrl);
+
+ netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
+
+ skb_put(skb, len);
+ addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
+ dma_unmap_single(&bp->pdev->dev, addr,
+ bp->rx_buffer_size, DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, bp->dev);
+ skb_checksum_none_assert(skb);
+
+ bp->stats.rx_packets++;
+ bp->stats.rx_bytes += skb->len;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+ netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ skb->len, skb->csum);
+ print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ skb->mac_header, 16, true);
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ skb->data, 32, true);
+#endif
+
+ netif_receive_skb(skb);
+ }
+
+ gem_rx_refill(bp);
+
+ return count;
+}
+
static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
unsigned int last_frag)
{
unsigned int len;
unsigned int frag;
- unsigned int offset = 0;
+ unsigned int offset;
struct sk_buff *skb;
+ struct macb_dma_desc *desc;
- len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
+ desc = macb_rx_desc(bp, last_frag);
+ len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
- netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
- first_frag, last_frag, len);
+ netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+ macb_rx_ring_wrap(first_frag),
+ macb_rx_ring_wrap(last_frag), len);
- skb = dev_alloc_skb(len + RX_OFFSET);
+ /*
+ * The ethernet header starts NET_IP_ALIGN bytes into the
+ * first buffer. Since the header is 14 bytes, this makes the
+ * payload word-aligned.
+ *
+ * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
+ * the two padding bytes into the skb so that we avoid hitting
+ * the slowpath in memcpy(), and pull them off afterwards.
+ */
+ skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
if (!skb) {
bp->stats.rx_dropped++;
- for (frag = first_frag; ; frag = NEXT_RX(frag)) {
- bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ for (frag = first_frag; ; frag++) {
+ desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
break;
}
+
+ /* Make descriptor updates visible to hardware */
wmb();
+
return 1;
}
- skb_reserve(skb, RX_OFFSET);
+ offset = 0;
+ len += NET_IP_ALIGN;
skb_checksum_none_assert(skb);
skb_put(skb, len);
- for (frag = first_frag; ; frag = NEXT_RX(frag)) {
- unsigned int frag_len = RX_BUFFER_SIZE;
+ for (frag = first_frag; ; frag++) {
+ unsigned int frag_len = bp->rx_buffer_size;
if (offset + frag_len > len) {
BUG_ON(frag != last_frag);
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
- (bp->rx_buffers +
- (RX_BUFFER_SIZE * frag)),
- frag_len);
- offset += RX_BUFFER_SIZE;
- bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
- wmb();
+ macb_rx_buffer(bp, frag), frag_len);
+ offset += bp->rx_buffer_size;
+ desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
break;
}
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ __skb_pull(skb, NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, bp->dev);
bp->stats.rx_packets++;
- bp->stats.rx_bytes += len;
- netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ bp->stats.rx_bytes += skb->len;
+ netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
netif_receive_skb(skb);
return 0;
}
-/* Mark DMA descriptors from begin up to and not including end as unused */
-static void discard_partial_frame(struct macb *bp, unsigned int begin,
- unsigned int end)
-{
- unsigned int frag;
-
- for (frag = begin; frag != end; frag = NEXT_RX(frag))
- bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
- wmb();
-
- /*
- * When this happens, the hardware stats registers for
- * whatever caused this is updated, so we don't have to record
- * anything.
- */
-}
-
static int macb_rx(struct macb *bp, int budget)
{
int received = 0;
- unsigned int tail = bp->rx_tail;
+ unsigned int tail;
int first_frag = -1;
- for (; budget > 0; tail = NEXT_RX(tail)) {
+ for (tail = bp->rx_tail; budget > 0; tail++) {
+ struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
u32 addr, ctrl;
+ /* Make hw descriptor updates visible to CPU */
rmb();
- addr = bp->rx_ring[tail].addr;
- ctrl = bp->rx_ring[tail].ctrl;
+
+ addr = desc->addr;
+ ctrl = desc->ctrl;
if (!(addr & MACB_BIT(RX_USED)))
break;
@@ -514,18 +874,22 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = 0;
- netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n",
+ netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
- work_done = macb_rx(bp, budget);
+ work_done = bp->macbgem_ops.mog_rx(bp, budget);
if (work_done < budget) {
napi_complete(napi);
- /*
- * We've done what we can to clean the buffers. Make sure we
- * get notified when new packets arrive.
- */
- macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ /* Packets received while interrupts were disabled */
+ status = macb_readl(bp, RSR);
+ if (status) {
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(RCOMP));
+ napi_reschedule(napi);
+ } else {
+ macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ }
}
/* TODO: Handle errors */
@@ -549,10 +913,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
while (status) {
/* close possible race with dev_close */
if (unlikely(!netif_running(dev))) {
- macb_writel(bp, IDR, ~0UL);
+ macb_writel(bp, IDR, -1);
break;
}
+ netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
+
if (status & MACB_RX_INT_FLAGS) {
/*
* There's no point taking any more interrupts
@@ -562,16 +928,27 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* now.
*/
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(RCOMP));
if (napi_schedule_prep(&bp->napi)) {
- netdev_dbg(bp->dev, "scheduling RX softirq\n");
+ netdev_vdbg(bp->dev, "scheduling RX softirq\n");
__napi_schedule(&bp->napi);
}
}
- if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE)))
- macb_tx(bp);
+ if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
+ macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
+ schedule_work(&bp->tx_error_task);
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
+
+ break;
+ }
+
+ if (status & MACB_BIT(TCOMP))
+ macb_tx_interrupt(bp);
/*
* Link change detection isn't possible with RMII, so we'll
@@ -584,6 +961,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
}
if (status & MACB_BIT(HRESP)) {
@@ -593,6 +973,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* (work queue?)
*/
netdev_err(dev, "DMA bus error: HRESP not OK\n");
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(HRESP));
}
status = macb_readl(bp, ISR);
@@ -623,11 +1006,13 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct macb *bp = netdev_priv(dev);
dma_addr_t mapping;
unsigned int len, entry;
+ struct macb_dma_desc *desc;
+ struct macb_tx_skb *tx_skb;
u32 ctrl;
unsigned long flags;
-#ifdef DEBUG
- netdev_dbg(bp->dev,
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+ netdev_vdbg(bp->dev,
"start_xmit: len %u head %p data %p tail %p end %p\n",
skb->len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb));
@@ -639,7 +1024,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */
- if (TX_BUFFS_AVAIL(bp) < 1) {
+ if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&bp->lock, flags);
netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
@@ -648,13 +1033,20 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- entry = bp->tx_head;
- netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry);
+ entry = macb_tx_ring_wrap(bp->tx_head);
+ netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- bp->tx_skb[entry].skb = skb;
- bp->tx_skb[entry].mapping = mapping;
- netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+
+ bp->tx_head++;
+ tx_skb = &bp->tx_skb[entry];
+ tx_skb->skb = skb;
+ tx_skb->mapping = mapping;
+ netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
skb->data, (unsigned long)mapping);
ctrl = MACB_BF(TX_FRMLEN, len);
@@ -662,31 +1054,91 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (entry == (TX_RING_SIZE - 1))
ctrl |= MACB_BIT(TX_WRAP);
- bp->tx_ring[entry].addr = mapping;
- bp->tx_ring[entry].ctrl = ctrl;
- wmb();
+ desc = &bp->tx_ring[entry];
+ desc->addr = mapping;
+ desc->ctrl = ctrl;
- entry = NEXT_TX(entry);
- bp->tx_head = entry;
+ /* Make newly initialized descriptor visible to hardware */
+ wmb();
skb_tx_timestamp(skb);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- if (TX_BUFFS_AVAIL(bp) < 1)
+ if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
netif_stop_queue(dev);
+unlock:
spin_unlock_irqrestore(&bp->lock, flags);
return NETDEV_TX_OK;
}
+static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
+{
+ if (!macb_is_gem(bp)) {
+ bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
+ } else {
+ bp->rx_buffer_size = size;
+
+ if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
+ netdev_dbg(bp->dev,
+ "RX buffer must be multiple of %d bytes, expanding\n",
+ RX_BUFFER_MULTIPLE);
+ bp->rx_buffer_size =
+ roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
+ }
+ }
+
+ netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
+ bp->dev->mtu, bp->rx_buffer_size);
+}
+
+static void gem_free_rx_buffers(struct macb *bp)
+{
+ struct sk_buff *skb;
+ struct macb_dma_desc *desc;
+ dma_addr_t addr;
+ int i;
+
+ if (!bp->rx_skbuff)
+ return;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = bp->rx_skbuff[i];
+
+ if (skb == NULL)
+ continue;
+
+ desc = &bp->rx_ring[i];
+ addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+ dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+
+ kfree(bp->rx_skbuff);
+ bp->rx_skbuff = NULL;
+}
+
+static void macb_free_rx_buffers(struct macb *bp)
+{
+ if (bp->rx_buffers) {
+ dma_free_coherent(&bp->pdev->dev,
+ RX_RING_SIZE * bp->rx_buffer_size,
+ bp->rx_buffers, bp->rx_buffers_dma);
+ bp->rx_buffers = NULL;
+ }
+}
+
static void macb_free_consistent(struct macb *bp)
{
if (bp->tx_skb) {
kfree(bp->tx_skb);
bp->tx_skb = NULL;
}
+ bp->macbgem_ops.mog_free_rx_buffers(bp);
if (bp->rx_ring) {
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
bp->rx_ring, bp->rx_ring_dma);
@@ -697,19 +1149,44 @@ static void macb_free_consistent(struct macb *bp)
bp->tx_ring, bp->tx_ring_dma);
bp->tx_ring = NULL;
}
- if (bp->rx_buffers) {
- dma_free_coherent(&bp->pdev->dev,
- RX_RING_SIZE * RX_BUFFER_SIZE,
- bp->rx_buffers, bp->rx_buffers_dma);
- bp->rx_buffers = NULL;
- }
+}
+
+static int gem_alloc_rx_buffers(struct macb *bp)
+{
+ int size;
+
+ size = RX_RING_SIZE * sizeof(struct sk_buff *);
+ bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
+ if (!bp->rx_skbuff)
+ return -ENOMEM;
+ else
+ netdev_dbg(bp->dev,
+ "Allocated %d RX struct sk_buff entries at %p\n",
+ RX_RING_SIZE, bp->rx_skbuff);
+ return 0;
+}
+
+static int macb_alloc_rx_buffers(struct macb *bp)
+{
+ int size;
+
+ size = RX_RING_SIZE * bp->rx_buffer_size;
+ bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+ &bp->rx_buffers_dma, GFP_KERNEL);
+ if (!bp->rx_buffers)
+ return -ENOMEM;
+ else
+ netdev_dbg(bp->dev,
+ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+ return 0;
}
static int macb_alloc_consistent(struct macb *bp)
{
int size;
- size = TX_RING_SIZE * sizeof(struct ring_info);
+ size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
bp->tx_skb = kmalloc(size, GFP_KERNEL);
if (!bp->tx_skb)
goto out_err;
@@ -732,14 +1209,8 @@ static int macb_alloc_consistent(struct macb *bp)
"Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
- size = RX_RING_SIZE * RX_BUFFER_SIZE;
- bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->rx_buffers_dma, GFP_KERNEL);
- if (!bp->rx_buffers)
+ if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
return 0;
@@ -748,6 +1219,21 @@ out_err:
return -ENOMEM;
}
+static void gem_init_rings(struct macb *bp)
+{
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ bp->tx_ring[i].addr = 0;
+ bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ }
+ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+ bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
+
+ gem_rx_refill(bp);
+}
+
static void macb_init_rings(struct macb *bp)
{
int i;
@@ -757,7 +1243,7 @@ static void macb_init_rings(struct macb *bp)
for (i = 0; i < RX_RING_SIZE; i++) {
bp->rx_ring[i].addr = addr;
bp->rx_ring[i].ctrl = 0;
- addr += RX_BUFFER_SIZE;
+ addr += bp->rx_buffer_size;
}
bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
@@ -772,9 +1258,6 @@ static void macb_init_rings(struct macb *bp)
static void macb_reset_hw(struct macb *bp)
{
- /* Make sure we have the write buffer for ourselves */
- wmb();
-
/*
* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
@@ -785,11 +1268,11 @@ static void macb_reset_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
/* Clear all status flags */
- macb_writel(bp, TSR, ~0UL);
- macb_writel(bp, RSR, ~0UL);
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
/* Disable all interrupts */
- macb_writel(bp, IDR, ~0UL);
+ macb_writel(bp, IDR, -1);
macb_readl(bp, ISR);
}
@@ -857,8 +1340,12 @@ static u32 macb_dbw(struct macb *bp)
}
/*
- * Configure the receive DMA engine to use the correct receive buffer size.
- * This is a configurable parameter for GEM.
+ * Configure the receive DMA engine
+ * - use the correct receive buffer size
+ * - set the possibility to use INCR16 bursts
+ * (if not supported by FIFO, it will fallback to default)
+ * - set both rx/tx packet buffers to full memory size
+ * These are configurable parameters for GEM.
*/
static void macb_configure_dma(struct macb *bp)
{
@@ -866,19 +1353,34 @@ static void macb_configure_dma(struct macb *bp)
if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
- dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
+ dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
+ dmacfg |= GEM_BF(FBLDO, 16);
+ dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
+ dmacfg &= ~GEM_BIT(ENDIA);
gem_writel(bp, DMACFG, dmacfg);
}
}
+/*
+ * Configure peripheral capacities according to integration options used
+ */
+static void macb_configure_caps(struct macb *bp)
+{
+ if (macb_is_gem(bp)) {
+ if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
+ bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+ }
+}
+
static void macb_init_hw(struct macb *bp)
{
u32 config;
macb_reset_hw(bp);
- __macb_set_hwaddr(bp);
+ macb_set_hwaddr(bp);
config = macb_mdc_clk_div(bp);
+ config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
config |= MACB_BIT(BIG); /* Receive oversized frames */
@@ -888,8 +1390,11 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BIT(NBC); /* No BroadCast */
config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
+ bp->speed = SPEED_10;
+ bp->duplex = DUPLEX_HALF;
macb_configure_dma(bp);
+ macb_configure_caps(bp);
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, bp->rx_ring_dma);
@@ -899,13 +1404,8 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
/* Enable interrupts */
- macb_writel(bp, IER, (MACB_BIT(RCOMP)
- | MACB_BIT(RXUBR)
- | MACB_BIT(ISR_TUND)
- | MACB_BIT(ISR_RLE)
- | MACB_BIT(TXERR)
- | MACB_BIT(TCOMP)
- | MACB_BIT(ISR_ROVR)
+ macb_writel(bp, IER, (MACB_RX_INT_FLAGS
+ | MACB_TX_INT_FLAGS
| MACB_BIT(HRESP)));
}
@@ -993,7 +1493,7 @@ static void macb_sethashtable(struct net_device *dev)
/*
* Enable/Disable promiscuous and multicast modes.
*/
-static void macb_set_rx_mode(struct net_device *dev)
+void macb_set_rx_mode(struct net_device *dev)
{
unsigned long cfg;
struct macb *bp = netdev_priv(dev);
@@ -1025,20 +1525,25 @@ static void macb_set_rx_mode(struct net_device *dev)
macb_writel(bp, NCFGR, cfg);
}
+EXPORT_SYMBOL_GPL(macb_set_rx_mode);
static int macb_open(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
int err;
netdev_dbg(bp->dev, "open\n");
+ /* carrier starts down */
+ netif_carrier_off(dev);
+
/* if the phy is not yet register, retry later*/
if (!bp->phy_dev)
return -EAGAIN;
- if (!is_valid_ether_addr(dev->dev_addr))
- return -EADDRNOTAVAIL;
+ /* RX buffers initialization */
+ macb_init_rx_buffer_size(bp, bufsz);
err = macb_alloc_consistent(bp);
if (err) {
@@ -1049,7 +1554,7 @@ static int macb_open(struct net_device *dev)
napi_enable(&bp->napi);
- macb_init_rings(bp);
+ bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
/* schedule a link state check */
@@ -1129,7 +1634,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
return nstat;
}
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct net_device_stats *nstat = &bp->stats;
@@ -1175,6 +1680,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return nstat;
}
+EXPORT_SYMBOL_GPL(macb_get_stats);
static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -1198,24 +1704,55 @@ static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return phy_ethtool_sset(phydev, cmd);
}
-static void macb_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+static int macb_get_regs_len(struct net_device *netdev)
+{
+ return MACB_GREGS_NBR * sizeof(u32);
+}
+
+static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
{
struct macb *bp = netdev_priv(dev);
+ unsigned int tail, head;
+ u32 *regs_buff = p;
+
+ regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
+ | MACB_GREGS_VERSION;
+
+ tail = macb_tx_ring_wrap(bp->tx_tail);
+ head = macb_tx_ring_wrap(bp->tx_head);
+
+ regs_buff[0] = macb_readl(bp, NCR);
+ regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
+ regs_buff[2] = macb_readl(bp, NSR);
+ regs_buff[3] = macb_readl(bp, TSR);
+ regs_buff[4] = macb_readl(bp, RBQP);
+ regs_buff[5] = macb_readl(bp, TBQP);
+ regs_buff[6] = macb_readl(bp, RSR);
+ regs_buff[7] = macb_readl(bp, IMR);
- strcpy(info->driver, bp->pdev->dev.driver->name);
- strcpy(info->version, "$Revision: 1.14 $");
- strcpy(info->bus_info, dev_name(&bp->pdev->dev));
+ regs_buff[8] = tail;
+ regs_buff[9] = head;
+ regs_buff[10] = macb_tx_dma(bp, tail);
+ regs_buff[11] = macb_tx_dma(bp, head);
+
+ if (macb_is_gem(bp)) {
+ regs_buff[12] = gem_readl(bp, USRIO);
+ regs_buff[13] = gem_readl(bp, DMACFG);
+ }
}
-static const struct ethtool_ops macb_ethtool_ops = {
+const struct ethtool_ops macb_ethtool_ops = {
.get_settings = macb_get_settings,
.set_settings = macb_set_settings,
- .get_drvinfo = macb_get_drvinfo,
+ .get_regs_len = macb_get_regs_len,
+ .get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
};
+EXPORT_SYMBOL_GPL(macb_ethtool_ops);
-static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct macb *bp = netdev_priv(dev);
struct phy_device *phydev = bp->phy_dev;
@@ -1228,6 +1765,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(phydev, rq, cmd);
}
+EXPORT_SYMBOL_GPL(macb_ioctl);
static const struct net_device_ops macb_netdev_ops = {
.ndo_open = macb_open,
@@ -1253,41 +1791,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,gem" },
{ /* sentinel */ }
};
-
MODULE_DEVICE_TABLE(of, macb_dt_ids);
-
-static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
-
- if (np)
- return of_get_phy_mode(np);
-
- return -ENODEV;
-}
-
-static int __devinit macb_get_hwaddr_dt(struct macb *bp)
-{
- struct device_node *np = bp->pdev->dev.of_node;
- if (np) {
- const char *mac = of_get_mac_address(np);
- if (mac) {
- memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
- return 0;
- }
- }
-
- return -ENODEV;
-}
-#else
-static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
-{
- return -ENODEV;
-}
-static int __devinit macb_get_hwaddr_dt(struct macb *bp)
-{
- return -ENODEV;
-}
#endif
static int __init macb_probe(struct platform_device *pdev)
@@ -1299,6 +1803,8 @@ static int __init macb_probe(struct platform_device *pdev)
struct phy_device *phydev;
u32 config;
int err = -ENXIO;
+ struct pinctrl *pinctrl;
+ const char *mac;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -1306,12 +1812,19 @@ static int __init macb_probe(struct platform_device *pdev)
goto err_out;
}
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ err = PTR_ERR(pinctrl);
+ if (err == -EPROBE_DEFER)
+ goto err_out;
+
+ dev_warn(&pdev->dev, "No pinctrl provided\n");
+ }
+
err = -ENOMEM;
dev = alloc_etherdev(sizeof(*bp));
- if (!dev) {
- dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
+ if (!dev)
goto err_out;
- }
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1323,22 +1836,46 @@ static int __init macb_probe(struct platform_device *pdev)
bp->dev = dev;
spin_lock_init(&bp->lock);
+ INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
- bp->pclk = clk_get(&pdev->dev, "pclk");
+ bp->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) {
- dev_err(&pdev->dev, "failed to get macb_clk\n");
+ err = PTR_ERR(bp->pclk);
+ dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
goto err_out_free_dev;
}
- clk_enable(bp->pclk);
- bp->hclk = clk_get(&pdev->dev, "hclk");
+ bp->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
- dev_err(&pdev->dev, "failed to get hclk\n");
- goto err_out_put_pclk;
+ err = PTR_ERR(bp->hclk);
+ dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+ goto err_out_free_dev;
+ }
+
+ bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+
+ err = clk_prepare_enable(bp->pclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ goto err_out_free_dev;
+ }
+
+ err = clk_prepare_enable(bp->hclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+ goto err_out_disable_pclk;
+ }
+
+ if (!IS_ERR(bp->tx_clk)) {
+ err = clk_prepare_enable(bp->tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
+ err);
+ goto err_out_disable_hclk;
+ }
}
- clk_enable(bp->hclk);
- bp->regs = ioremap(regs->start, resource_size(regs));
+ bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!bp->regs) {
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
err = -ENOMEM;
@@ -1346,11 +1883,12 @@ static int __init macb_probe(struct platform_device *pdev)
}
dev->irq = platform_get_irq(pdev, 0);
- err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
+ err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
+ dev->name, dev);
if (err) {
dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
dev->irq, err);
- goto err_out_iounmap;
+ goto err_out_disable_clocks;
}
dev->netdev_ops = &macb_netdev_ops;
@@ -1359,18 +1897,33 @@ static int __init macb_probe(struct platform_device *pdev)
dev->base_addr = regs->start;
+ /* setup appropriated routines according to adapter type */
+ if (macb_is_gem(bp)) {
+ bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
+ bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
+ bp->macbgem_ops.mog_init_rings = gem_init_rings;
+ bp->macbgem_ops.mog_rx = gem_rx;
+ } else {
+ bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
+ bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
+ bp->macbgem_ops.mog_init_rings = macb_init_rings;
+ bp->macbgem_ops.mog_rx = macb_rx;
+ }
+
/* Set MII management clock divider */
config = macb_mdc_clk_div(bp);
config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
- err = macb_get_hwaddr_dt(bp);
- if (err < 0)
+ mac = of_get_mac_address(pdev->dev.of_node);
+ if (mac)
+ memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ else
macb_get_hwaddr(bp);
- err = macb_get_phy_mode_dt(pdev);
+ err = of_get_phy_mode(pdev->dev.of_node);
if (err < 0) {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->is_rmii)
bp->phy_interface = PHY_INTERFACE_MODE_RMII;
else
@@ -1379,7 +1932,9 @@ static int __init macb_probe(struct platform_device *pdev)
bp->phy_interface = err;
}
- if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+ if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
+ else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
#if defined(CONFIG_ARCH_AT91)
macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
MACB_BIT(CLKEN)));
@@ -1393,20 +1948,20 @@ static int __init macb_probe(struct platform_device *pdev)
macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
#endif
- bp->tx_pending = DEF_TX_RING_PENDING;
-
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_free_irq;
+ goto err_out_disable_clocks;
}
- if (macb_mii_init(bp) != 0) {
+ err = macb_mii_init(bp);
+ if (err)
goto err_out_unregister_netdev;
- }
platform_set_drvdata(pdev, dev);
+ netif_carrier_off(dev);
+
netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
dev->irq, dev->dev_addr);
@@ -1419,20 +1974,16 @@ static int __init macb_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
-err_out_free_irq:
- free_irq(dev->irq, dev);
-err_out_iounmap:
- iounmap(bp->regs);
err_out_disable_clocks:
- clk_disable(bp->hclk);
- clk_put(bp->hclk);
- clk_disable(bp->pclk);
-err_out_put_pclk:
- clk_put(bp->pclk);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
+err_out_disable_hclk:
+ clk_disable_unprepare(bp->hclk);
+err_out_disable_pclk:
+ clk_disable_unprepare(bp->pclk);
err_out_free_dev:
free_netdev(dev);
err_out:
- platform_set_drvdata(pdev, NULL);
return err;
}
@@ -1451,73 +2002,64 @@ static int __exit macb_remove(struct platform_device *pdev)
kfree(bp->mii_bus->irq);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
- iounmap(bp->regs);
- clk_disable(bp->hclk);
- clk_put(bp->hclk);
- clk_disable(bp->pclk);
- clk_put(bp->pclk);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
+ clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(bp->pclk);
free_netdev(dev);
- platform_set_drvdata(pdev, NULL);
}
return 0;
}
#ifdef CONFIG_PM
-static int macb_suspend(struct platform_device *pdev, pm_message_t state)
+static int macb_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
+ netif_carrier_off(netdev);
netif_device_detach(netdev);
- clk_disable(bp->hclk);
- clk_disable(bp->pclk);
+ if (!IS_ERR(bp->tx_clk))
+ clk_disable_unprepare(bp->tx_clk);
+ clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(bp->pclk);
return 0;
}
-static int macb_resume(struct platform_device *pdev)
+static int macb_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
- clk_enable(bp->pclk);
- clk_enable(bp->hclk);
+ clk_prepare_enable(bp->pclk);
+ clk_prepare_enable(bp->hclk);
+ if (!IS_ERR(bp->tx_clk))
+ clk_prepare_enable(bp->tx_clk);
netif_device_attach(netdev);
return 0;
}
-#else
-#define macb_suspend NULL
-#define macb_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
+
static struct platform_driver macb_driver = {
.remove = __exit_p(macb_remove),
- .suspend = macb_suspend,
- .resume = macb_resume,
.driver = {
.name = "macb",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(macb_dt_ids),
+ .pm = &macb_pm_ops,
},
};
-static int __init macb_init(void)
-{
- return platform_driver_probe(&macb_driver, macb_probe);
-}
-
-static void __exit macb_exit(void)
-{
- platform_driver_unregister(&macb_driver);
-}
-
-module_init(macb_init);
-module_exit(macb_exit);
+module_platform_driver_probe(macb_driver, macb_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");