diff options
Diffstat (limited to 'drivers/net/ethernet/freescale')
28 files changed, 6743 insertions, 4862 deletions
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 3574e1499df..270308315d4 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -26,6 +26,7 @@ config FEC ARCH_MXC || SOC_IMX28) default ARCH_MXC || SOC_IMX28 if ARM select PHYLIB + select PTP_1588_CLOCK ---help--- Say Y here if you want to use the built-in 10/100 Fast ethernet controller on some Motorola ColdFire and Freescale i.MX processors. @@ -62,6 +63,14 @@ config FSL_PQ_MDIO ---help--- This driver supports the MDIO bus used by the gianfar and UCC drivers. +config FSL_XGMAC_MDIO + tristate "Freescale XGMAC MDIO" + depends on FSL_SOC + select PHYLIB + select OF_MDIO + ---help--- + This driver supports the MDIO bus on the Fman 10G Ethernet MACs. + config UCC_GETH tristate "Freescale QE Gigabit Ethernet" depends on QUICC_ENGINE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 1752488c9ee..71debd1c18c 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -3,16 +3,17 @@ # obj-$(CONFIG_FEC) += fec.o +fec-objs :=fec_main.o fec_ptp.o obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o endif obj-$(CONFIG_FS_ENET) += fs_enet/ obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o +obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o gianfar_driver-objs := gianfar.o \ - gianfar_ethtool.o \ - gianfar_sysfs.o + gianfar_ethtool.o obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c deleted file mode 100644 index e92ef1bd732..00000000000 --- a/drivers/net/ethernet/freescale/fec.c +++ /dev/null @@ -1,1759 +0,0 @@ -/* - * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. - * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) - * - * Right now, I am very wasteful with the buffers. I allocate memory - * pages and then divide them into 2K frame buffers. This way I know I - * have buffers large enough to hold one frame within one buffer descriptor. - * Once I get this working, I will use 64 or 128 byte CPM buffers, which - * will be much more memory efficient and will easily handle lots of - * small packets. - * - * Much better multiple PHY support by Magnus Damm. - * Copyright (c) 2000 Ericsson Radio Systems AB. - * - * Support for FEC controller of ColdFire processors. - * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) - * - * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) - * Copyright (c) 2004-2006 Macq Electronique SA. - * - * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/workqueue.h> -#include <linux/bitops.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/clk.h> -#include <linux/platform_device.h> -#include <linux/phy.h> -#include <linux/fec.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_gpio.h> -#include <linux/of_net.h> - -#include <asm/cacheflush.h> - -#ifndef CONFIG_ARM -#include <asm/coldfire.h> -#include <asm/mcfsim.h> -#endif - -#include "fec.h" - -#if defined(CONFIG_ARM) -#define FEC_ALIGNMENT 0xf -#else -#define FEC_ALIGNMENT 0x3 -#endif - -#define DRIVER_NAME "fec" - -/* Controller is ENET-MAC */ -#define FEC_QUIRK_ENET_MAC (1 << 0) -/* Controller needs driver to swap frame */ -#define FEC_QUIRK_SWAP_FRAME (1 << 1) -/* Controller uses gasket */ -#define FEC_QUIRK_USE_GASKET (1 << 2) -/* Controller has GBIT support */ -#define FEC_QUIRK_HAS_GBIT (1 << 3) - -static struct platform_device_id fec_devtype[] = { - { - /* keep it for coldfire */ - .name = DRIVER_NAME, - .driver_data = 0, - }, { - .name = "imx25-fec", - .driver_data = FEC_QUIRK_USE_GASKET, - }, { - .name = "imx27-fec", - .driver_data = 0, - }, { - .name = "imx28-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, - }, { - .name = "imx6q-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT, - }, { - /* sentinel */ - } -}; -MODULE_DEVICE_TABLE(platform, fec_devtype); - -enum imx_fec_type { - IMX25_FEC = 1, /* runs on i.mx25/50/53 */ - IMX27_FEC, /* runs on i.mx27/35/51 */ - IMX28_FEC, - IMX6Q_FEC, -}; - -static const struct of_device_id fec_dt_ids[] = { - { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, - { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, - { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, - { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, fec_dt_ids); - -static unsigned char macaddr[ETH_ALEN]; -module_param_array(macaddr, byte, NULL, 0); -MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); - -#if defined(CONFIG_M5272) -/* - * Some hardware gets it MAC address out of local flash memory. - * if this is non-zero then assume it is the address to get MAC from. - */ -#if defined(CONFIG_NETtel) -#define FEC_FLASHMAC 0xf0006006 -#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) -#define FEC_FLASHMAC 0xf0006000 -#elif defined(CONFIG_CANCam) -#define FEC_FLASHMAC 0xf0020000 -#elif defined (CONFIG_M5272C3) -#define FEC_FLASHMAC (0xffe04000 + 4) -#elif defined(CONFIG_MOD5272) -#define FEC_FLASHMAC 0xffc0406b -#else -#define FEC_FLASHMAC 0 -#endif -#endif /* CONFIG_M5272 */ - -/* The number of Tx and Rx buffers. These are allocated from the page - * pool. The code may assume these are power of two, so it it best - * to keep them that size. - * We don't need to allocate pages for the transmitter. We just use - * the skbuffer directly. - */ -#define FEC_ENET_RX_PAGES 8 -#define FEC_ENET_RX_FRSIZE 2048 -#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) -#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) -#define FEC_ENET_TX_FRSIZE 2048 -#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) -#define TX_RING_SIZE 16 /* Must be power of two */ -#define TX_RING_MOD_MASK 15 /* for this to work */ - -#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) -#error "FEC: descriptor ring size constants too large" -#endif - -/* Interrupt events/masks. */ -#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ -#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ -#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ -#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ -#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ -#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ -#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ -#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ -#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ -#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ - -#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) - -/* The FEC stores dest/src/type, data, and checksum for receive packets. - */ -#define PKT_MAXBUF_SIZE 1518 -#define PKT_MINBUF_SIZE 64 -#define PKT_MAXBLR_SIZE 1520 - -/* This device has up to three irqs on some platforms */ -#define FEC_IRQ_NUM 3 - -/* - * The 5270/5271/5280/5282/532x RX control register also contains maximum frame - * size bits. Other FEC hardware does not, so we need to take that into - * account when setting it. - */ -#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) -#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) -#else -#define OPT_FRAME_SIZE 0 -#endif - -/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and - * tx_bd_base always point to the base of the buffer descriptors. The - * cur_rx and cur_tx point to the currently available buffer. - * The dirty_tx tracks the current buffer that is being sent by the - * controller. The cur_tx and dirty_tx are equal under both completely - * empty and completely full conditions. The empty/ready indicator in - * the buffer descriptor determines the actual condition. - */ -struct fec_enet_private { - /* Hardware registers of the FEC device */ - void __iomem *hwp; - - struct net_device *netdev; - - struct clk *clk; - - /* The saved address of a sent-in-place packet/buffer, for skfree(). */ - unsigned char *tx_bounce[TX_RING_SIZE]; - struct sk_buff* tx_skbuff[TX_RING_SIZE]; - struct sk_buff* rx_skbuff[RX_RING_SIZE]; - ushort skb_cur; - ushort skb_dirty; - - /* CPM dual port RAM relative addresses */ - dma_addr_t bd_dma; - /* Address of Rx and Tx buffers */ - struct bufdesc *rx_bd_base; - struct bufdesc *tx_bd_base; - /* The next free ring entry */ - struct bufdesc *cur_rx, *cur_tx; - /* The ring entries to be free()ed */ - struct bufdesc *dirty_tx; - - uint tx_full; - /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ - spinlock_t hw_lock; - - struct platform_device *pdev; - - int opened; - int dev_id; - - /* Phylib and MDIO interface */ - struct mii_bus *mii_bus; - struct phy_device *phy_dev; - int mii_timeout; - uint phy_speed; - phy_interface_t phy_interface; - int link; - int full_duplex; - struct completion mdio_done; - int irq[FEC_IRQ_NUM]; -}; - -/* FEC MII MMFR bits definition */ -#define FEC_MMFR_ST (1 << 30) -#define FEC_MMFR_OP_READ (2 << 28) -#define FEC_MMFR_OP_WRITE (1 << 28) -#define FEC_MMFR_PA(v) ((v & 0x1f) << 23) -#define FEC_MMFR_RA(v) ((v & 0x1f) << 18) -#define FEC_MMFR_TA (2 << 16) -#define FEC_MMFR_DATA(v) (v & 0xffff) - -#define FEC_MII_TIMEOUT 30000 /* us */ - -/* Transmitter timeout */ -#define TX_TIMEOUT (2 * HZ) - -static int mii_cnt; - -static void *swap_buffer(void *bufaddr, int len) -{ - int i; - unsigned int *buf = bufaddr; - - for (i = 0; i < (len + 3) / 4; i++, buf++) - *buf = cpu_to_be32(*buf); - - return bufaddr; -} - -static netdev_tx_t -fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - struct bufdesc *bdp; - void *bufaddr; - unsigned short status; - unsigned long flags; - - if (!fep->link) { - /* Link is down or autonegotiation is in progress. */ - return NETDEV_TX_BUSY; - } - - spin_lock_irqsave(&fep->hw_lock, flags); - /* Fill in a Tx ring entry */ - bdp = fep->cur_tx; - - status = bdp->cbd_sc; - - if (status & BD_ENET_TX_READY) { - /* Ooops. All transmit buffers are full. Bail out. - * This should not happen, since ndev->tbusy should be set. - */ - printk("%s: tx queue full!.\n", ndev->name); - spin_unlock_irqrestore(&fep->hw_lock, flags); - return NETDEV_TX_BUSY; - } - - /* Clear all of the status flags */ - status &= ~BD_ENET_TX_STATS; - - /* Set buffer length and buffer pointer */ - bufaddr = skb->data; - bdp->cbd_datlen = skb->len; - - /* - * On some FEC implementations data must be aligned on - * 4-byte boundaries. Use bounce buffers to copy data - * and get it aligned. Ugh. - */ - if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { - unsigned int index; - index = bdp - fep->tx_bd_base; - memcpy(fep->tx_bounce[index], skb->data, skb->len); - bufaddr = fep->tx_bounce[index]; - } - - /* - * Some design made an incorrect assumption on endian mode of - * the system that it's running on. As the result, driver has to - * swap every frame going to and coming from the controller. - */ - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) - swap_buffer(bufaddr, skb->len); - - /* Save skb pointer */ - fep->tx_skbuff[fep->skb_cur] = skb; - - ndev->stats.tx_bytes += skb->len; - fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; - - /* Push the data cache so the CPM does not get stale memory - * data. - */ - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, - FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); - - /* Send it on its way. Tell FEC it's ready, interrupt when done, - * it's the last BD of the frame, and to put the CRC on the end. - */ - status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR - | BD_ENET_TX_LAST | BD_ENET_TX_TC); - bdp->cbd_sc = status; - - /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); - - /* If this was the last BD in the ring, start at the beginning again. */ - if (status & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp++; - - if (bdp == fep->dirty_tx) { - fep->tx_full = 1; - netif_stop_queue(ndev); - } - - fep->cur_tx = bdp; - - skb_tx_timestamp(skb); - - spin_unlock_irqrestore(&fep->hw_lock, flags); - - return NETDEV_TX_OK; -} - -/* This function is called to start or restart the FEC during a link - * change. This only happens when switching between half and full - * duplex. - */ -static void -fec_restart(struct net_device *ndev, int duplex) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - int i; - u32 temp_mac[2]; - u32 rcntl = OPT_FRAME_SIZE | 0x04; - u32 ecntl = 0x2; /* ETHEREN */ - - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); - - /* - * enet-mac reset will reset mac address registers too, - * so need to reconfigure it. - */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { - memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); - writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); - writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); - } - - /* Clear any outstanding interrupt. */ - writel(0xffc00000, fep->hwp + FEC_IEVENT); - - /* Reset all multicast. */ - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); -#ifndef CONFIG_M5272 - writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_HASH_TABLE_LOW); -#endif - - /* Set maximum receive buffer size. */ - writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); - - /* Set receive and transmit descriptor base. */ - writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, - fep->hwp + FEC_X_DES_START); - - fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; - fep->cur_rx = fep->rx_bd_base; - - /* Reset SKB transmit buffers. */ - fep->skb_cur = fep->skb_dirty = 0; - for (i = 0; i <= TX_RING_MOD_MASK; i++) { - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; - } - } - - /* Enable MII mode */ - if (duplex) { - /* FD enable */ - writel(0x04, fep->hwp + FEC_X_CNTRL); - } else { - /* No Rcv on Xmit */ - rcntl |= 0x02; - writel(0x0, fep->hwp + FEC_X_CNTRL); - } - - fep->full_duplex = duplex; - - /* Set MII speed */ - writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); - - /* - * The phy interface and speed need to get configured - * differently on enet-mac. - */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { - /* Enable flow control and length check */ - rcntl |= 0x40000000 | 0x00000020; - - /* RGMII, RMII or MII */ - if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) - rcntl |= (1 << 6); - else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) - rcntl |= (1 << 8); - else - rcntl &= ~(1 << 8); - - /* 1G, 100M or 10M */ - if (fep->phy_dev) { - if (fep->phy_dev->speed == SPEED_1000) - ecntl |= (1 << 5); - else if (fep->phy_dev->speed == SPEED_100) - rcntl &= ~(1 << 9); - else - rcntl |= (1 << 9); - } - } else { -#ifdef FEC_MIIGSK_ENR - if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { - u32 cfgr; - /* disable the gasket and wait */ - writel(0, fep->hwp + FEC_MIIGSK_ENR); - while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) - udelay(1); - - /* - * configure the gasket: - * RMII, 50 MHz, no loopback, no echo - * MII, 25 MHz, no loopback, no echo - */ - cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) - ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; - if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) - cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; - writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); - - /* re-enable the gasket */ - writel(2, fep->hwp + FEC_MIIGSK_ENR); - } -#endif - } - writel(rcntl, fep->hwp + FEC_R_CNTRL); - - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { - /* enable ENET endian swap */ - ecntl |= (1 << 8); - /* enable ENET store and forward mode */ - writel(1 << 8, fep->hwp + FEC_X_WMRK); - } - - /* And last, enable the transmit and receive processing */ - writel(ecntl, fep->hwp + FEC_ECNTRL); - writel(0, fep->hwp + FEC_R_DES_ACTIVE); - - /* Enable interrupts we wish to service */ - writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); -} - -static void -fec_stop(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); - - /* We cannot expect a graceful transmit stop without link !!! */ - if (fep->link) { - writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ - udelay(10); - if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) - printk("fec_stop : Graceful transmit stop did not complete !\n"); - } - - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); - writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); - writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); - - /* We have to keep ENET enabled to have MII interrupt stay working */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { - writel(2, fep->hwp + FEC_ECNTRL); - writel(rmii_mode, fep->hwp + FEC_R_CNTRL); - } -} - - -static void -fec_timeout(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - - ndev->stats.tx_errors++; - - fec_restart(ndev, fep->full_duplex); - netif_wake_queue(ndev); -} - -static void -fec_enet_tx(struct net_device *ndev) -{ - struct fec_enet_private *fep; - struct bufdesc *bdp; - unsigned short status; - struct sk_buff *skb; - - fep = netdev_priv(ndev); - spin_lock(&fep->hw_lock); - bdp = fep->dirty_tx; - - while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { - if (bdp == fep->cur_tx && fep->tx_full == 0) - break; - - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; - - skb = fep->tx_skbuff[fep->skb_dirty]; - /* Check for errors. */ - if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | - BD_ENET_TX_RL | BD_ENET_TX_UN | - BD_ENET_TX_CSL)) { - ndev->stats.tx_errors++; - if (status & BD_ENET_TX_HB) /* No heartbeat */ - ndev->stats.tx_heartbeat_errors++; - if (status & BD_ENET_TX_LC) /* Late collision */ - ndev->stats.tx_window_errors++; - if (status & BD_ENET_TX_RL) /* Retrans limit */ - ndev->stats.tx_aborted_errors++; - if (status & BD_ENET_TX_UN) /* Underrun */ - ndev->stats.tx_fifo_errors++; - if (status & BD_ENET_TX_CSL) /* Carrier lost */ - ndev->stats.tx_carrier_errors++; - } else { - ndev->stats.tx_packets++; - } - - if (status & BD_ENET_TX_READY) - printk("HEY! Enet xmit interrupt and TX_READY.\n"); - - /* Deferred means some collisions occurred during transmit, - * but we eventually sent the packet OK. - */ - if (status & BD_ENET_TX_DEF) - ndev->stats.collisions++; - - /* Free the sk buffer associated with this last transmit */ - dev_kfree_skb_any(skb); - fep->tx_skbuff[fep->skb_dirty] = NULL; - fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; - - /* Update pointer to next buffer descriptor to be transmitted */ - if (status & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp++; - - /* Since we have freed up a buffer, the ring is no longer full - */ - if (fep->tx_full) { - fep->tx_full = 0; - if (netif_queue_stopped(ndev)) - netif_wake_queue(ndev); - } - } - fep->dirty_tx = bdp; - spin_unlock(&fep->hw_lock); -} - - -/* During a receive, the cur_rx points to the current incoming buffer. - * When we update through the ring, if the next incoming buffer has - * not been given to the system, we just set the empty indicator, - * effectively tossing the packet. - */ -static void -fec_enet_rx(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - struct bufdesc *bdp; - unsigned short status; - struct sk_buff *skb; - ushort pkt_len; - __u8 *data; - -#ifdef CONFIG_M532x - flush_cache_all(); -#endif - - spin_lock(&fep->hw_lock); - - /* First, grab all of the stats for the incoming packet. - * These get messed up if we get called due to a busy condition. - */ - bdp = fep->cur_rx; - - while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { - - /* Since we have allocated space to hold a complete frame, - * the last indicator should be set. - */ - if ((status & BD_ENET_RX_LAST) == 0) - printk("FEC ENET: rcv is not +last\n"); - - if (!fep->opened) - goto rx_processing_done; - - /* Check for errors. */ - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | - BD_ENET_RX_CR | BD_ENET_RX_OV)) { - ndev->stats.rx_errors++; - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { - /* Frame too long or too short. */ - ndev->stats.rx_length_errors++; - } - if (status & BD_ENET_RX_NO) /* Frame alignment */ - ndev->stats.rx_frame_errors++; - if (status & BD_ENET_RX_CR) /* CRC Error */ - ndev->stats.rx_crc_errors++; - if (status & BD_ENET_RX_OV) /* FIFO overrun */ - ndev->stats.rx_fifo_errors++; - } - - /* Report late collisions as a frame error. - * On this error, the BD is closed, but we don't know what we - * have in the buffer. So, just drop this frame on the floor. - */ - if (status & BD_ENET_RX_CL) { - ndev->stats.rx_errors++; - ndev->stats.rx_frame_errors++; - goto rx_processing_done; - } - - /* Process the incoming frame. */ - ndev->stats.rx_packets++; - pkt_len = bdp->cbd_datlen; - ndev->stats.rx_bytes += pkt_len; - data = (__u8*)__va(bdp->cbd_bufaddr); - - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); - - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) - swap_buffer(data, pkt_len); - - /* This does 16 byte alignment, exactly what we need. - * The packet length includes FCS, but we don't want to - * include that when passing upstream as it messes up - * bridging applications. - */ - skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); - - if (unlikely(!skb)) { - printk("%s: Memory squeeze, dropping packet.\n", - ndev->name); - ndev->stats.rx_dropped++; - } else { - skb_reserve(skb, NET_IP_ALIGN); - skb_put(skb, pkt_len - 4); /* Make room */ - skb_copy_to_linear_data(skb, data, pkt_len - 4); - skb->protocol = eth_type_trans(skb, ndev); - if (!skb_defer_rx_timestamp(skb)) - netif_rx(skb); - } - - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, - FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); -rx_processing_done: - /* Clear the status flags for this buffer */ - status &= ~BD_ENET_RX_STATS; - - /* Mark the buffer empty */ - status |= BD_ENET_RX_EMPTY; - bdp->cbd_sc = status; - - /* Update BD pointer to next entry */ - if (status & BD_ENET_RX_WRAP) - bdp = fep->rx_bd_base; - else - bdp++; - /* Doing this here will keep the FEC running while we process - * incoming frames. On a heavily loaded network, we should be - * able to keep up at the expense of system resources. - */ - writel(0, fep->hwp + FEC_R_DES_ACTIVE); - } - fep->cur_rx = bdp; - - spin_unlock(&fep->hw_lock); -} - -static irqreturn_t -fec_enet_interrupt(int irq, void *dev_id) -{ - struct net_device *ndev = dev_id; - struct fec_enet_private *fep = netdev_priv(ndev); - uint int_events; - irqreturn_t ret = IRQ_NONE; - - do { - int_events = readl(fep->hwp + FEC_IEVENT); - writel(int_events, fep->hwp + FEC_IEVENT); - - if (int_events & FEC_ENET_RXF) { - ret = IRQ_HANDLED; - fec_enet_rx(ndev); - } - - /* Transmit OK, or non-fatal error. Update the buffer - * descriptors. FEC handles all errors, we just discover - * them as part of the transmit process. - */ - if (int_events & FEC_ENET_TXF) { - ret = IRQ_HANDLED; - fec_enet_tx(ndev); - } - - if (int_events & FEC_ENET_MII) { - ret = IRQ_HANDLED; - complete(&fep->mdio_done); - } - } while (int_events); - - return ret; -} - - - -/* ------------------------------------------------------------------------- */ -static void __inline__ fec_get_mac(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_platform_data *pdata = fep->pdev->dev.platform_data; - unsigned char *iap, tmpaddr[ETH_ALEN]; - - /* - * try to get mac address in following order: - * - * 1) module parameter via kernel command line in form - * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 - */ - iap = macaddr; - -#ifdef CONFIG_OF - /* - * 2) from device tree data - */ - if (!is_valid_ether_addr(iap)) { - struct device_node *np = fep->pdev->dev.of_node; - if (np) { - const char *mac = of_get_mac_address(np); - if (mac) - iap = (unsigned char *) mac; - } - } -#endif - - /* - * 3) from flash or fuse (via platform data) - */ - if (!is_valid_ether_addr(iap)) { -#ifdef CONFIG_M5272 - if (FEC_FLASHMAC) - iap = (unsigned char *)FEC_FLASHMAC; -#else - if (pdata) - iap = (unsigned char *)&pdata->mac; -#endif - } - - /* - * 4) FEC mac registers set by bootloader - */ - if (!is_valid_ether_addr(iap)) { - *((unsigned long *) &tmpaddr[0]) = - be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW)); - *((unsigned short *) &tmpaddr[4]) = - be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); - iap = &tmpaddr[0]; - } - - memcpy(ndev->dev_addr, iap, ETH_ALEN); - - /* Adjust MAC if using macaddr */ - if (iap == macaddr) - ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; -} - -/* ------------------------------------------------------------------------- */ - -/* - * Phy section - */ -static void fec_enet_adjust_link(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phy_dev = fep->phy_dev; - unsigned long flags; - - int status_change = 0; - - spin_lock_irqsave(&fep->hw_lock, flags); - - /* Prevent a state halted on mii error */ - if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { - phy_dev->state = PHY_RESUMING; - goto spin_unlock; - } - - /* Duplex link change */ - if (phy_dev->link) { - if (fep->full_duplex != phy_dev->duplex) { - fec_restart(ndev, phy_dev->duplex); - /* prevent unnecessary second fec_restart() below */ - fep->link = phy_dev->link; - status_change = 1; - } - } - - /* Link on or off change */ - if (phy_dev->link != fep->link) { - fep->link = phy_dev->link; - if (phy_dev->link) - fec_restart(ndev, phy_dev->duplex); - else - fec_stop(ndev); - status_change = 1; - } - -spin_unlock: - spin_unlock_irqrestore(&fep->hw_lock, flags); - - if (status_change) - phy_print_status(phy_dev); -} - -static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) -{ - struct fec_enet_private *fep = bus->priv; - unsigned long time_left; - - fep->mii_timeout = 0; - init_completion(&fep->mdio_done); - - /* start a read op */ - writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | - FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); - - /* wait for end of transfer */ - time_left = wait_for_completion_timeout(&fep->mdio_done, - usecs_to_jiffies(FEC_MII_TIMEOUT)); - if (time_left == 0) { - fep->mii_timeout = 1; - printk(KERN_ERR "FEC: MDIO read timeout\n"); - return -ETIMEDOUT; - } - - /* return value */ - return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); -} - -static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, - u16 value) -{ - struct fec_enet_private *fep = bus->priv; - unsigned long time_left; - - fep->mii_timeout = 0; - init_completion(&fep->mdio_done); - - /* start a write op */ - writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | - FEC_MMFR_TA | FEC_MMFR_DATA(value), - fep->hwp + FEC_MII_DATA); - - /* wait for end of transfer */ - time_left = wait_for_completion_timeout(&fep->mdio_done, - usecs_to_jiffies(FEC_MII_TIMEOUT)); - if (time_left == 0) { - fep->mii_timeout = 1; - printk(KERN_ERR "FEC: MDIO write timeout\n"); - return -ETIMEDOUT; - } - - return 0; -} - -static int fec_enet_mdio_reset(struct mii_bus *bus) -{ - return 0; -} - -static int fec_enet_mii_probe(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - struct phy_device *phy_dev = NULL; - char mdio_bus_id[MII_BUS_ID_SIZE]; - char phy_name[MII_BUS_ID_SIZE + 3]; - int phy_id; - int dev_id = fep->dev_id; - - fep->phy_dev = NULL; - - /* check for attached phy */ - for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { - if ((fep->mii_bus->phy_mask & (1 << phy_id))) - continue; - if (fep->mii_bus->phy_map[phy_id] == NULL) - continue; - if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) - continue; - if (dev_id--) - continue; - strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); - break; - } - - if (phy_id >= PHY_MAX_ADDR) { - printk(KERN_INFO - "%s: no PHY, assuming direct connection to switch\n", - ndev->name); - strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); - phy_id = 0; - } - - snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id); - phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0, - fep->phy_interface); - if (IS_ERR(phy_dev)) { - printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name); - return PTR_ERR(phy_dev); - } - - /* mask with MAC supported features */ - if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) - phy_dev->supported &= PHY_GBIT_FEATURES; - else - phy_dev->supported &= PHY_BASIC_FEATURES; - - phy_dev->advertising = phy_dev->supported; - - fep->phy_dev = phy_dev; - fep->link = 0; - fep->full_duplex = 0; - - printk(KERN_INFO - "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - ndev->name, - fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), - fep->phy_dev->irq); - - return 0; -} - -static int fec_enet_mii_init(struct platform_device *pdev) -{ - static struct mii_bus *fec0_mii_bus; - struct net_device *ndev = platform_get_drvdata(pdev); - struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - int err = -ENXIO, i; - - /* - * The dual fec interfaces are not equivalent with enet-mac. - * Here are the differences: - * - * - fec0 supports MII & RMII modes while fec1 only supports RMII - * - fec0 acts as the 1588 time master while fec1 is slave - * - external phys can only be configured by fec0 - * - * That is to say fec1 can not work independently. It only works - * when fec0 is working. The reason behind this design is that the - * second interface is added primarily for Switch mode. - * - * Because of the last point above, both phys are attached on fec0 - * mdio interface in board design, and need to be configured by - * fec0 mii_bus. - */ - if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { - /* fec1 uses fec0 mii_bus */ - if (mii_cnt && fec0_mii_bus) { - fep->mii_bus = fec0_mii_bus; - mii_cnt++; - return 0; - } - return -ENOENT; - } - - fep->mii_timeout = 0; - - /* - * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) - * - * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while - * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 - * Reference Manual has an error on this, and gets fixed on i.MX6Q - * document. - */ - fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000); - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) - fep->phy_speed--; - fep->phy_speed <<= 1; - writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); - - fep->mii_bus = mdiobus_alloc(); - if (fep->mii_bus == NULL) { - err = -ENOMEM; - goto err_out; - } - - fep->mii_bus->name = "fec_enet_mii_bus"; - fep->mii_bus->read = fec_enet_mdio_read; - fep->mii_bus->write = fec_enet_mdio_write; - fep->mii_bus->reset = fec_enet_mdio_reset; - snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, fep->dev_id + 1); - fep->mii_bus->priv = fep; - fep->mii_bus->parent = &pdev->dev; - - fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!fep->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - fep->mii_bus->irq[i] = PHY_POLL; - - if (mdiobus_register(fep->mii_bus)) - goto err_out_free_mdio_irq; - - mii_cnt++; - - /* save fec0 mii_bus */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) - fec0_mii_bus = fep->mii_bus; - - return 0; - -err_out_free_mdio_irq: - kfree(fep->mii_bus->irq); -err_out_free_mdiobus: - mdiobus_free(fep->mii_bus); -err_out: - return err; -} - -static void fec_enet_mii_remove(struct fec_enet_private *fep) -{ - if (--mii_cnt == 0) { - mdiobus_unregister(fep->mii_bus); - kfree(fep->mii_bus->irq); - mdiobus_free(fep->mii_bus); - } -} - -static int fec_enet_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = fep->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int fec_enet_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = fep->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - -static void fec_enet_get_drvinfo(struct net_device *ndev, - struct ethtool_drvinfo *info) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - - strcpy(info->driver, fep->pdev->dev.driver->name); - strcpy(info->version, "Revision: 1.0"); - strcpy(info->bus_info, dev_name(&ndev->dev)); -} - -static const struct ethtool_ops fec_enet_ethtool_ops = { - .get_settings = fec_enet_get_settings, - .set_settings = fec_enet_set_settings, - .get_drvinfo = fec_enet_get_drvinfo, - .get_link = ethtool_op_get_link, -}; - -static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = fep->phy_dev; - - if (!netif_running(ndev)) - return -EINVAL; - - if (!phydev) - return -ENODEV; - - return phy_mii_ioctl(phydev, rq, cmd); -} - -static void fec_enet_free_buffers(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - int i; - struct sk_buff *skb; - struct bufdesc *bdp; - - bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { - skb = fep->rx_skbuff[i]; - - if (bdp->cbd_bufaddr) - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); - if (skb) - dev_kfree_skb(skb); - bdp++; - } - - bdp = fep->tx_bd_base; - for (i = 0; i < TX_RING_SIZE; i++) - kfree(fep->tx_bounce[i]); -} - -static int fec_enet_alloc_buffers(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - int i; - struct sk_buff *skb; - struct bufdesc *bdp; - - bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { - skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); - if (!skb) { - fec_enet_free_buffers(ndev); - return -ENOMEM; - } - fep->rx_skbuff[i] = skb; - - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); - bdp->cbd_sc = BD_ENET_RX_EMPTY; - bdp++; - } - - /* Set the last buffer to wrap. */ - bdp--; - bdp->cbd_sc |= BD_SC_WRAP; - - bdp = fep->tx_bd_base; - for (i = 0; i < TX_RING_SIZE; i++) { - fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); - - bdp->cbd_sc = 0; - bdp->cbd_bufaddr = 0; - bdp++; - } - - /* Set the last buffer to wrap. */ - bdp--; - bdp->cbd_sc |= BD_SC_WRAP; - - return 0; -} - -static int -fec_enet_open(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - int ret; - - /* I should reset the ring buffers here, but I don't yet know - * a simple way to do that. - */ - - ret = fec_enet_alloc_buffers(ndev); - if (ret) - return ret; - - /* Probe and connect to PHY when open the interface */ - ret = fec_enet_mii_probe(ndev); - if (ret) { - fec_enet_free_buffers(ndev); - return ret; - } - phy_start(fep->phy_dev); - netif_start_queue(ndev); - fep->opened = 1; - return 0; -} - -static int -fec_enet_close(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - - /* Don't know what to do yet. */ - fep->opened = 0; - netif_stop_queue(ndev); - fec_stop(ndev); - - if (fep->phy_dev) { - phy_stop(fep->phy_dev); - phy_disconnect(fep->phy_dev); - } - - fec_enet_free_buffers(ndev); - - return 0; -} - -/* Set or clear the multicast filter for this adaptor. - * Skeleton taken from sunlance driver. - * The CPM Ethernet implementation allows Multicast as well as individual - * MAC address filtering. Some of the drivers check to make sure it is - * a group multicast address, and discard those that are not. I guess I - * will do the same for now, but just remove the test if you want - * individual filtering as well (do the upper net layers want or support - * this kind of feature?). - */ - -#define HASH_BITS 6 /* #bits in hash */ -#define CRC32_POLY 0xEDB88320 - -static void set_multicast_list(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct netdev_hw_addr *ha; - unsigned int i, bit, data, crc, tmp; - unsigned char hash; - - if (ndev->flags & IFF_PROMISC) { - tmp = readl(fep->hwp + FEC_R_CNTRL); - tmp |= 0x8; - writel(tmp, fep->hwp + FEC_R_CNTRL); - return; - } - - tmp = readl(fep->hwp + FEC_R_CNTRL); - tmp &= ~0x8; - writel(tmp, fep->hwp + FEC_R_CNTRL); - - if (ndev->flags & IFF_ALLMULTI) { - /* Catch all multicast addresses, so set the - * filter to all 1's - */ - writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - - return; - } - - /* Clear filter and add the addresses in hash register - */ - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - - netdev_for_each_mc_addr(ha, ndev) { - /* calculate crc32 value of mac address */ - crc = 0xffffffff; - - for (i = 0; i < ndev->addr_len; i++) { - data = ha->addr[i]; - for (bit = 0; bit < 8; bit++, data >>= 1) { - crc = (crc >> 1) ^ - (((crc ^ data) & 1) ? CRC32_POLY : 0); - } - } - - /* only upper 6 bits (HASH_BITS) are used - * which point to specific bit in he hash registers - */ - hash = (crc >> (32 - HASH_BITS)) & 0x3f; - - if (hash > 31) { - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - tmp |= 1 << (hash - 32); - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - } else { - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); - tmp |= 1 << hash; - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - } - } -} - -/* Set a MAC change in hardware. */ -static int -fec_set_mac_address(struct net_device *ndev, void *p) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); - - writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | - (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), - fep->hwp + FEC_ADDR_LOW); - writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), - fep->hwp + FEC_ADDR_HIGH); - return 0; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * fec_poll_controller: FEC Poll controller function - * @dev: The FEC network adapter - * - * Polled functionality used by netconsole and others in non interrupt mode - * - */ -void fec_poll_controller(struct net_device *dev) -{ - int i; - struct fec_enet_private *fep = netdev_priv(dev); - - for (i = 0; i < FEC_IRQ_NUM; i++) { - if (fep->irq[i] > 0) { - disable_irq(fep->irq[i]); - fec_enet_interrupt(fep->irq[i], dev); - enable_irq(fep->irq[i]); - } - } -} -#endif - -static const struct net_device_ops fec_netdev_ops = { - .ndo_open = fec_enet_open, - .ndo_stop = fec_enet_close, - .ndo_start_xmit = fec_enet_start_xmit, - .ndo_set_rx_mode = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = fec_timeout, - .ndo_set_mac_address = fec_set_mac_address, - .ndo_do_ioctl = fec_enet_ioctl, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = fec_poll_controller, -#endif -}; - - /* - * XXX: We need to clean up on failure exits here. - * - */ -static int fec_enet_init(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct bufdesc *cbd_base; - struct bufdesc *bdp; - int i; - - /* Allocate memory for buffer descriptors. */ - cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, - GFP_KERNEL); - if (!cbd_base) { - printk("FEC: allocate descriptor memory failed?\n"); - return -ENOMEM; - } - - spin_lock_init(&fep->hw_lock); - - fep->netdev = ndev; - - /* Get the Ethernet address */ - fec_get_mac(ndev); - - /* Set receive and transmit descriptor base. */ - fep->rx_bd_base = cbd_base; - fep->tx_bd_base = cbd_base + RX_RING_SIZE; - - /* The FEC Ethernet specific entries in the device structure */ - ndev->watchdog_timeo = TX_TIMEOUT; - ndev->netdev_ops = &fec_netdev_ops; - ndev->ethtool_ops = &fec_enet_ethtool_ops; - - /* Initialize the receive buffer descriptors. */ - bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { - - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - bdp++; - } - - /* Set the last buffer to wrap */ - bdp--; - bdp->cbd_sc |= BD_SC_WRAP; - - /* ...and the same for transmit */ - bdp = fep->tx_bd_base; - for (i = 0; i < TX_RING_SIZE; i++) { - - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - bdp->cbd_bufaddr = 0; - bdp++; - } - - /* Set the last buffer to wrap */ - bdp--; - bdp->cbd_sc |= BD_SC_WRAP; - - fec_restart(ndev, 0); - - return 0; -} - -#ifdef CONFIG_OF -static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - - if (np) - return of_get_phy_mode(np); - - return -ENODEV; -} - -static void __devinit fec_reset_phy(struct platform_device *pdev) -{ - int err, phy_reset; - struct device_node *np = pdev->dev.of_node; - - if (!np) - return; - - phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); - err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset"); - if (err) { - pr_debug("FEC: failed to get gpio phy-reset: %d\n", err); - return; - } - msleep(1); - gpio_set_value(phy_reset, 1); -} -#else /* CONFIG_OF */ -static inline int fec_get_phy_mode_dt(struct platform_device *pdev) -{ - return -ENODEV; -} - -static inline void fec_reset_phy(struct platform_device *pdev) -{ - /* - * In case of platform probe, the reset has been done - * by machine code. - */ -} -#endif /* CONFIG_OF */ - -static int __devinit -fec_probe(struct platform_device *pdev) -{ - struct fec_enet_private *fep; - struct fec_platform_data *pdata; - struct net_device *ndev; - int i, irq, ret = 0; - struct resource *r; - const struct of_device_id *of_id; - static int dev_id; - - of_id = of_match_device(fec_dt_ids, &pdev->dev); - if (of_id) - pdev->id_entry = of_id->data; - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) - return -ENXIO; - - r = request_mem_region(r->start, resource_size(r), pdev->name); - if (!r) - return -EBUSY; - - /* Init network device */ - ndev = alloc_etherdev(sizeof(struct fec_enet_private)); - if (!ndev) { - ret = -ENOMEM; - goto failed_alloc_etherdev; - } - - SET_NETDEV_DEV(ndev, &pdev->dev); - - /* setup board info structure */ - fep = netdev_priv(ndev); - - fep->hwp = ioremap(r->start, resource_size(r)); - fep->pdev = pdev; - fep->dev_id = dev_id++; - - if (!fep->hwp) { - ret = -ENOMEM; - goto failed_ioremap; - } - - platform_set_drvdata(pdev, ndev); - - ret = fec_get_phy_mode_dt(pdev); - if (ret < 0) { - pdata = pdev->dev.platform_data; - if (pdata) - fep->phy_interface = pdata->phy; - else - fep->phy_interface = PHY_INTERFACE_MODE_MII; - } else { - fep->phy_interface = ret; - } - - fec_reset_phy(pdev); - - for (i = 0; i < FEC_IRQ_NUM; i++) { - irq = platform_get_irq(pdev, i); - if (irq < 0) { - if (i) - break; - ret = irq; - goto failed_irq; - } - ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); - if (ret) { - while (--i >= 0) { - irq = platform_get_irq(pdev, i); - free_irq(irq, ndev); - } - goto failed_irq; - } - } - - fep->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(fep->clk)) { - ret = PTR_ERR(fep->clk); - goto failed_clk; - } - clk_prepare_enable(fep->clk); - - ret = fec_enet_init(ndev); - if (ret) - goto failed_init; - - ret = fec_enet_mii_init(pdev); - if (ret) - goto failed_mii_init; - - /* Carrier starts down, phylib will bring it up */ - netif_carrier_off(ndev); - - ret = register_netdev(ndev); - if (ret) - goto failed_register; - - return 0; - -failed_register: - fec_enet_mii_remove(fep); -failed_mii_init: -failed_init: - clk_disable_unprepare(fep->clk); - clk_put(fep->clk); -failed_clk: - for (i = 0; i < FEC_IRQ_NUM; i++) { - irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } -failed_irq: - iounmap(fep->hwp); -failed_ioremap: - free_netdev(ndev); -failed_alloc_etherdev: - release_mem_region(r->start, resource_size(r)); - - return ret; -} - -static int __devexit -fec_drv_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct fec_enet_private *fep = netdev_priv(ndev); - struct resource *r; - int i; - - unregister_netdev(ndev); - fec_enet_mii_remove(fep); - for (i = 0; i < FEC_IRQ_NUM; i++) { - int irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } - clk_disable_unprepare(fep->clk); - clk_put(fep->clk); - iounmap(fep->hwp); - free_netdev(ndev); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - BUG_ON(!r); - release_mem_region(r->start, resource_size(r)); - - platform_set_drvdata(pdev, NULL); - - return 0; -} - -#ifdef CONFIG_PM -static int -fec_suspend(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct fec_enet_private *fep = netdev_priv(ndev); - - if (netif_running(ndev)) { - fec_stop(ndev); - netif_device_detach(ndev); - } - clk_disable_unprepare(fep->clk); - - return 0; -} - -static int -fec_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct fec_enet_private *fep = netdev_priv(ndev); - - clk_prepare_enable(fep->clk); - if (netif_running(ndev)) { - fec_restart(ndev, fep->full_duplex); - netif_device_attach(ndev); - } - - return 0; -} - -static const struct dev_pm_ops fec_pm_ops = { - .suspend = fec_suspend, - .resume = fec_resume, - .freeze = fec_suspend, - .thaw = fec_resume, - .poweroff = fec_suspend, - .restore = fec_resume, -}; -#endif - -static struct platform_driver fec_driver = { - .driver = { - .name = DRIVER_NAME, - .owner = THIS_MODULE, -#ifdef CONFIG_PM - .pm = &fec_pm_ops, -#endif - .of_match_table = fec_dt_ids, - }, - .id_table = fec_devtype, - .probe = fec_probe, - .remove = __devexit_p(fec_drv_remove), -}; - -static int __init -fec_enet_module_init(void) -{ - printk(KERN_INFO "FEC Ethernet Driver\n"); - - return platform_driver_register(&fec_driver); -} - -static void __exit -fec_enet_cleanup(void) -{ - platform_driver_unregister(&fec_driver); -} - -module_exit(fec_enet_cleanup); -module_init(fec_enet_module_init); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 8408c627b19..671d080105a 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -13,6 +13,10 @@ #define FEC_H /****************************************************************************/ +#include <linux/clocksource.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> + #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) @@ -44,6 +48,11 @@ #define FEC_R_DES_START 0x180 /* Receive descriptor ring */ #define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ +#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ +#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ +#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ +#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ +#define FEC_RACC 0x1C4 /* Receive Accelerator function */ #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ @@ -51,6 +60,61 @@ #define BM_MIIGSK_CFGR_RMII 0x01 #define BM_MIIGSK_CFGR_FRCONT_10M 0x40 +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */ +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */ +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */ +#define RMON_T_MC_PKT 0x20C /* RMON TX multicast pkts */ +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */ +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */ +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */ +#define RMON_T_FRAG 0x21C /* RMON TX pkts < 64 bytes, bad CRC */ +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */ +#define RMON_T_COL 0x224 /* RMON TX collision count */ +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */ +#define RMON_T_P65TO127 0x22C /* RMON TX 65 to 127 byte pkts */ +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */ +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */ +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */ +#define RMON_T_P1024TO2047 0x23C /* RMON TX 1024 to 2047 byte pkts */ +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */ +#define RMON_T_OCTETS 0x244 /* RMON TX octets */ +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */ +#define IEEE_T_FRAME_OK 0x24C /* Frames tx'd OK */ +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */ +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ +#define IEEE_T_LCOL 0x25C /* Frames tx'd with late collision */ +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ +#define IEEE_T_SQE 0x26C /* Frames tx'd with SQE err */ +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */ +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */ +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */ +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */ +#define RMON_R_MC_PKT 0x28C /* RMON RX multicast pkts */ +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */ +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */ +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */ +#define RMON_R_FRAG 0x29C /* RMON RX pkts < 64 bytes, bad CRC */ +#define RMON_R_JAB 0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ +#define RMON_R_RESVD_O 0x2A4 /* Reserved */ +#define RMON_R_P64 0x2A8 /* RMON RX 64 byte pkts */ +#define RMON_R_P65TO127 0x2AC /* RMON RX 65 to 127 byte pkts */ +#define RMON_R_P128TO255 0x2B0 /* RMON RX 128 to 255 byte pkts */ +#define RMON_R_P256TO511 0x2B4 /* RMON RX 256 to 511 byte pkts */ +#define RMON_R_P512TO1023 0x2B8 /* RMON RX 512 to 1023 byte pkts */ +#define RMON_R_P1024TO2047 0x2BC /* RMON RX 1024 to 2047 byte pkts */ +#define RMON_R_P_GTE2048 0x2C0 /* RMON RX pkts > 2048 bytes */ +#define RMON_R_OCTETS 0x2C4 /* RMON RX octets */ +#define IEEE_R_DROP 0x2C8 /* Count frames not counted correctly */ +#define IEEE_R_FRAME_OK 0x2CC /* Frames rx'd OK */ +#define IEEE_R_CRC 0x2D0 /* Frames rx'd with CRC err */ +#define IEEE_R_ALIGN 0x2D4 /* Frames rx'd with alignment err */ +#define IEEE_R_MACERR 0x2D8 /* Receive FIFO overflow count */ +#define IEEE_R_FDXFC 0x2DC /* Flow control pause frames rx'd */ +#define IEEE_R_OCTETS_OK 0x2E0 /* Octet cnt for frames rx'd w/o err */ + #else #define FEC_ECNTRL 0x000 /* Ethernet control reg */ @@ -97,6 +161,15 @@ struct bufdesc { }; #endif +struct bufdesc_ex { + struct bufdesc desc; + unsigned long cbd_esc; + unsigned long cbd_prot; + unsigned long cbd_bdu; + unsigned long ts; + unsigned short res0[4]; +}; + /* * The following definitions courtesy of commproc.h, which where * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). @@ -130,6 +203,9 @@ struct bufdesc { #define BD_ENET_RX_CL ((ushort)0x0001) #define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ +/* Enhanced buffer descriptor control/status used by Ethernet receive */ +#define BD_ENET_RX_VLAN 0x00000004 + /* Buffer descriptor control/status used by Ethernet transmit. */ #define BD_ENET_TX_READY ((ushort)0x8000) @@ -145,8 +221,133 @@ struct bufdesc { #define BD_ENET_TX_RCMASK ((ushort)0x003c) #define BD_ENET_TX_UN ((ushort)0x0002) #define BD_ENET_TX_CSL ((ushort)0x0001) -#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ +#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */ + +/*enhanced buffer descriptor control/status used by Ethernet transmit*/ +#define BD_ENET_TX_INT 0x40000000 +#define BD_ENET_TX_TS 0x20000000 +#define BD_ENET_TX_PINS 0x10000000 +#define BD_ENET_TX_IINS 0x08000000 + + +/* This device has up to three irqs on some platforms */ +#define FEC_IRQ_NUM 3 + +/* The number of Tx and Rx buffers. These are allocated from the page + * pool. The code may assume these are power of two, so it it best + * to keep them that size. + * We don't need to allocate pages for the transmitter. We just use + * the skbuffer directly. + */ + +#define FEC_ENET_RX_PAGES 8 +#define FEC_ENET_RX_FRSIZE 2048 +#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) +#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) +#define FEC_ENET_TX_FRSIZE 2048 +#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) +#define TX_RING_SIZE 512 /* Must be power of two */ +#define TX_RING_MOD_MASK 511 /* for this to work */ + +#define BD_ENET_RX_INT 0x00800000 +#define BD_ENET_RX_PTP ((ushort)0x0400) +#define BD_ENET_RX_ICE 0x00000020 +#define BD_ENET_RX_PCR 0x00000010 +#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) +#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) + +struct fec_enet_delayed_work { + struct delayed_work delay_work; + bool timeout; + bool trig_tx; +}; + +/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and + * tx_bd_base always point to the base of the buffer descriptors. The + * cur_rx and cur_tx point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct fec_enet_private { + /* Hardware registers of the FEC device */ + void __iomem *hwp; + + struct net_device *netdev; + + struct clk *clk_ipg; + struct clk *clk_ahb; + struct clk *clk_enet_out; + struct clk *clk_ptp; + + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + unsigned char *tx_bounce[TX_RING_SIZE]; + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + + /* CPM dual port RAM relative addresses */ + dma_addr_t bd_dma; + /* Address of Rx and Tx buffers */ + struct bufdesc *rx_bd_base; + struct bufdesc *tx_bd_base; + /* The next free ring entry */ + struct bufdesc *cur_rx, *cur_tx; + /* The ring entries to be free()ed */ + struct bufdesc *dirty_tx; + + unsigned short bufdesc_size; + unsigned short tx_ring_size; + unsigned short rx_ring_size; + unsigned short tx_stop_threshold; + unsigned short tx_wake_threshold; + + /* Software TSO */ + char *tso_hdrs; + dma_addr_t tso_hdrs_dma; + + struct platform_device *pdev; + + int opened; + int dev_id; + + /* Phylib and MDIO interface */ + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + int mii_timeout; + uint phy_speed; + phy_interface_t phy_interface; + int link; + int full_duplex; + int speed; + struct completion mdio_done; + int irq[FEC_IRQ_NUM]; + int bufdesc_ex; + int pause_flag; + + struct napi_struct napi; + int csum_flags; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + unsigned long last_overflow_check; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + int rx_hwtstamp_filter; + u32 base_incval; + u32 cycle_speed; + int hwts_rx_en; + int hwts_tx_en; + struct timer_list time_keep; + struct fec_enet_delayed_work delay_work; + struct regulator *reg_phy; +}; +void fec_ptp_init(struct platform_device *pdev); +void fec_ptp_start_cyclecounter(struct net_device *ndev); +int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); +int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); /****************************************************************************/ #endif /* FEC_H */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c new file mode 100644 index 00000000000..77037fd377b --- /dev/null +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -0,0 +1,2726 @@ +/* + * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. + * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) + * + * Right now, I am very wasteful with the buffers. I allocate memory + * pages and then divide them into 2K frame buffers. This way I know I + * have buffers large enough to hold one frame within one buffer descriptor. + * Once I get this working, I will use 64 or 128 byte CPM buffers, which + * will be much more memory efficient and will easily handle lots of + * small packets. + * + * Much better multiple PHY support by Magnus Damm. + * Copyright (c) 2000 Ericsson Radio Systems AB. + * + * Support for FEC controller of ColdFire processors. + * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) + * + * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) + * Copyright (c) 2004-2006 Macq Electronique SA. + * + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <net/ip.h> +#include <net/tso.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/icmp.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/fec.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/of_net.h> +#include <linux/regulator/consumer.h> +#include <linux/if_vlan.h> +#include <linux/pinctrl/consumer.h> + +#include <asm/cacheflush.h> + +#include "fec.h" + +static void set_multicast_list(struct net_device *ndev); + +#if defined(CONFIG_ARM) +#define FEC_ALIGNMENT 0xf +#else +#define FEC_ALIGNMENT 0x3 +#endif + +#define DRIVER_NAME "fec" + +/* Pause frame feild and FIFO threshold */ +#define FEC_ENET_FCE (1 << 5) +#define FEC_ENET_RSEM_V 0x84 +#define FEC_ENET_RSFL_V 16 +#define FEC_ENET_RAEM_V 0x8 +#define FEC_ENET_RAFL_V 0x8 +#define FEC_ENET_OPD_V 0xFFF0 + +/* Controller is ENET-MAC */ +#define FEC_QUIRK_ENET_MAC (1 << 0) +/* Controller needs driver to swap frame */ +#define FEC_QUIRK_SWAP_FRAME (1 << 1) +/* Controller uses gasket */ +#define FEC_QUIRK_USE_GASKET (1 << 2) +/* Controller has GBIT support */ +#define FEC_QUIRK_HAS_GBIT (1 << 3) +/* Controller has extend desc buffer */ +#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) +/* Controller has hardware checksum support */ +#define FEC_QUIRK_HAS_CSUM (1 << 5) +/* Controller has hardware vlan support */ +#define FEC_QUIRK_HAS_VLAN (1 << 6) +/* ENET IP errata ERR006358 + * + * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously + * detected as not set during a prior frame transmission, then the + * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs + * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in + * frames not being transmitted until there is a 0-to-1 transition on + * ENET_TDAR[TDAR]. + */ +#define FEC_QUIRK_ERR006358 (1 << 7) + +static struct platform_device_id fec_devtype[] = { + { + /* keep it for coldfire */ + .name = DRIVER_NAME, + .driver_data = 0, + }, { + .name = "imx25-fec", + .driver_data = FEC_QUIRK_USE_GASKET, + }, { + .name = "imx27-fec", + .driver_data = 0, + }, { + .name = "imx28-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, + }, { + .name = "imx6q-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358, + }, { + .name = "mvf600-fec", + .driver_data = FEC_QUIRK_ENET_MAC, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, fec_devtype); + +enum imx_fec_type { + IMX25_FEC = 1, /* runs on i.mx25/50/53 */ + IMX27_FEC, /* runs on i.mx27/35/51 */ + IMX28_FEC, + IMX6Q_FEC, + MVF600_FEC, +}; + +static const struct of_device_id fec_dt_ids[] = { + { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, + { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, + { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, + { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, + { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, fec_dt_ids); + +static unsigned char macaddr[ETH_ALEN]; +module_param_array(macaddr, byte, NULL, 0); +MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); + +#if defined(CONFIG_M5272) +/* + * Some hardware gets it MAC address out of local flash memory. + * if this is non-zero then assume it is the address to get MAC from. + */ +#if defined(CONFIG_NETtel) +#define FEC_FLASHMAC 0xf0006006 +#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) +#define FEC_FLASHMAC 0xf0006000 +#elif defined(CONFIG_CANCam) +#define FEC_FLASHMAC 0xf0020000 +#elif defined (CONFIG_M5272C3) +#define FEC_FLASHMAC (0xffe04000 + 4) +#elif defined(CONFIG_MOD5272) +#define FEC_FLASHMAC 0xffc0406b +#else +#define FEC_FLASHMAC 0 +#endif +#endif /* CONFIG_M5272 */ + +/* Interrupt events/masks. */ +#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ +#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ +#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ +#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ +#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ +#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ +#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ +#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ +#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ +#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ + +#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) +#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) + +/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. + */ +#define PKT_MAXBUF_SIZE 1522 +#define PKT_MINBUF_SIZE 64 +#define PKT_MAXBLR_SIZE 1536 + +/* FEC receive acceleration */ +#define FEC_RACC_IPDIS (1 << 1) +#define FEC_RACC_PRODIS (1 << 2) +#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) + +/* + * The 5270/5271/5280/5282/532x RX control register also contains maximum frame + * size bits. Other FEC hardware does not, so we need to take that into + * account when setting it. + */ +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) +#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) +#else +#define OPT_FRAME_SIZE 0 +#endif + +/* FEC MII MMFR bits definition */ +#define FEC_MMFR_ST (1 << 30) +#define FEC_MMFR_OP_READ (2 << 28) +#define FEC_MMFR_OP_WRITE (1 << 28) +#define FEC_MMFR_PA(v) ((v & 0x1f) << 23) +#define FEC_MMFR_RA(v) ((v & 0x1f) << 18) +#define FEC_MMFR_TA (2 << 16) +#define FEC_MMFR_DATA(v) (v & 0xffff) + +#define FEC_MII_TIMEOUT 30000 /* us */ + +/* Transmitter timeout */ +#define TX_TIMEOUT (2 * HZ) + +#define FEC_PAUSE_FLAG_AUTONEG 0x1 +#define FEC_PAUSE_FLAG_ENABLE 0x2 + +#define TSO_HEADER_SIZE 128 +/* Max number of allowed TCP segments for software TSO */ +#define FEC_MAX_TSO_SEGS 100 +#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +#define IS_TSO_HEADER(txq, addr) \ + ((addr >= txq->tso_hdrs_dma) && \ + (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) + +static int mii_cnt; + +static inline +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +{ + struct bufdesc *new_bd = bdp + 1; + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; + struct bufdesc_ex *ex_base; + struct bufdesc *base; + int ring_size; + + if (bdp >= fep->tx_bd_base) { + base = fep->tx_bd_base; + ring_size = fep->tx_ring_size; + ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + } else { + base = fep->rx_bd_base; + ring_size = fep->rx_ring_size; + ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + } + + if (fep->bufdesc_ex) + return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? + ex_base : ex_new_bd); + else + return (new_bd >= (base + ring_size)) ? + base : new_bd; +} + +static inline +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +{ + struct bufdesc *new_bd = bdp - 1; + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; + struct bufdesc_ex *ex_base; + struct bufdesc *base; + int ring_size; + + if (bdp >= fep->tx_bd_base) { + base = fep->tx_bd_base; + ring_size = fep->tx_ring_size; + ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + } else { + base = fep->rx_bd_base; + ring_size = fep->rx_ring_size; + ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + } + + if (fep->bufdesc_ex) + return (struct bufdesc *)((ex_new_bd < ex_base) ? + (ex_new_bd + ring_size) : ex_new_bd); + else + return (new_bd < base) ? (new_bd + ring_size) : new_bd; +} + +static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, + struct fec_enet_private *fep) +{ + return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; +} + +static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep) +{ + int entries; + + entries = ((const char *)fep->dirty_tx - + (const char *)fep->cur_tx) / fep->bufdesc_size - 1; + + return entries > 0 ? entries : entries + fep->tx_ring_size; +} + +static void *swap_buffer(void *bufaddr, int len) +{ + int i; + unsigned int *buf = bufaddr; + + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++) + *buf = cpu_to_be32(*buf); + + return bufaddr; +} + +static inline bool is_ipv4_pkt(struct sk_buff *skb) +{ + return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; +} + +static int +fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) +{ + /* Only run for packets requiring a checksum. */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (unlikely(skb_cow_head(skb, 0))) + return -1; + + if (is_ipv4_pkt(skb)) + ip_hdr(skb)->check = 0; + *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; + + return 0; +} + +static void +fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep) +{ + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + struct bufdesc *bdp_pre; + + bdp_pre = fec_enet_get_prevdesc(bdp, fep); + if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && + !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { + fep->delay_work.trig_tx = true; + schedule_delayed_work(&(fep->delay_work.delay_work), + msecs_to_jiffies(1)); + } +} + +static int +fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + struct bufdesc *bdp = fep->cur_tx; + struct bufdesc_ex *ebdp; + int nr_frags = skb_shinfo(skb)->nr_frags; + int frag, frag_len; + unsigned short status; + unsigned int estatus = 0; + skb_frag_t *this_frag; + unsigned int index; + void *bufaddr; + int i; + + for (frag = 0; frag < nr_frags; frag++) { + this_frag = &skb_shinfo(skb)->frags[frag]; + bdp = fec_enet_get_nextdesc(bdp, fep); + ebdp = (struct bufdesc_ex *)bdp; + + status = bdp->cbd_sc; + status &= ~BD_ENET_TX_STATS; + status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); + frag_len = skb_shinfo(skb)->frags[frag].size; + + /* Handle the last BD specially */ + if (frag == nr_frags - 1) { + status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); + if (fep->bufdesc_ex) { + estatus |= BD_ENET_TX_INT; + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP && fep->hwts_tx_en)) + estatus |= BD_ENET_TX_TS; + } + } + + if (fep->bufdesc_ex) { + if (skb->ip_summed == CHECKSUM_PARTIAL) + estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = estatus; + } + + bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; + + index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + memcpy(fep->tx_bounce[index], bufaddr, frag_len); + bufaddr = fep->tx_bounce[index]; + + if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + swap_buffer(bufaddr, frag_len); + } + + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, + frag_len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + goto dma_mapping_error; + } + + bdp->cbd_datlen = frag_len; + bdp->cbd_sc = status; + } + + fep->cur_tx = bdp; + + return 0; + +dma_mapping_error: + bdp = fep->cur_tx; + for (i = 0; i < frag; i++) { + bdp = fec_enet_get_nextdesc(bdp, fep); + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + bdp->cbd_datlen, DMA_TO_DEVICE); + } + return NETDEV_TX_OK; +} + +static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + int nr_frags = skb_shinfo(skb)->nr_frags; + struct bufdesc *bdp, *last_bdp; + void *bufaddr; + unsigned short status; + unsigned short buflen; + unsigned int estatus = 0; + unsigned int index; + int entries_free; + int ret; + + entries_free = fec_enet_get_free_txdesc_num(fep); + if (entries_free < MAX_SKB_FRAGS + 1) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "NOT enough BD for SG!\n"); + return NETDEV_TX_OK; + } + + /* Protocol checksum off-load for TCP and UDP. */ + if (fec_enet_clear_csum(skb, ndev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* Fill in a Tx ring entry */ + bdp = fep->cur_tx; + status = bdp->cbd_sc; + status &= ~BD_ENET_TX_STATS; + + /* Set buffer length and buffer pointer */ + bufaddr = skb->data; + buflen = skb_headlen(skb); + + index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + memcpy(fep->tx_bounce[index], skb->data, buflen); + bufaddr = fep->tx_bounce[index]; + + if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + swap_buffer(bufaddr, buflen); + } + + /* Push the data cache so the CPM does not get stale memory + * data. + */ + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, + buflen, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_OK; + } + + if (nr_frags) { + ret = fec_enet_txq_submit_frag_skb(skb, ndev); + if (ret) + return ret; + } else { + status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); + if (fep->bufdesc_ex) { + estatus = BD_ENET_TX_INT; + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP && fep->hwts_tx_en)) + estatus |= BD_ENET_TX_TS; + } + } + + if (fep->bufdesc_ex) { + + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + fep->hwts_tx_en)) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; + + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = estatus; + } + + last_bdp = fep->cur_tx; + index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep); + /* Save skb pointer */ + fep->tx_skbuff[index] = skb; + + bdp->cbd_datlen = buflen; + + /* Send it on its way. Tell FEC it's ready, interrupt when done, + * it's the last BD of the frame, and to put the CRC on the end. + */ + status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); + bdp->cbd_sc = status; + + fec_enet_submit_work(bdp, fep); + + /* If this was the last BD in the ring, start at the beginning again. */ + bdp = fec_enet_get_nextdesc(last_bdp, fep); + + skb_tx_timestamp(skb); + + fep->cur_tx = bdp; + + /* Trigger transmission start */ + writel(0, fep->hwp + FEC_X_DES_ACTIVE); + + return 0; +} + +static int +fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, + struct bufdesc *bdp, int index, char *data, + int size, bool last_tcp, bool is_last) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + unsigned short status; + unsigned int estatus = 0; + + status = bdp->cbd_sc; + status &= ~BD_ENET_TX_STATS; + + status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); + bdp->cbd_datlen = size; + + if (((unsigned long) data) & FEC_ALIGNMENT || + id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + memcpy(fep->tx_bounce[index], data, size); + data = fep->tx_bounce[index]; + + if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + swap_buffer(data, size); + } + + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_BUSY; + } + + if (fep->bufdesc_ex) { + if (skb->ip_summed == CHECKSUM_PARTIAL) + estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = estatus; + } + + /* Handle the last BD specially */ + if (last_tcp) + status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); + if (is_last) { + status |= BD_ENET_TX_INTR; + if (fep->bufdesc_ex) + ebdp->cbd_esc |= BD_ENET_TX_INT; + } + + bdp->cbd_sc = status; + + return 0; +} + +static int +fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, + struct bufdesc *bdp, int index) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + void *bufaddr; + unsigned long dmabuf; + unsigned short status; + unsigned int estatus = 0; + + status = bdp->cbd_sc; + status &= ~BD_ENET_TX_STATS; + status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); + + bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE; + dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE; + if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { + memcpy(fep->tx_bounce[index], skb->data, hdr_len); + bufaddr = fep->tx_bounce[index]; + + if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + swap_buffer(bufaddr, hdr_len); + + dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, + hdr_len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_BUSY; + } + } + + bdp->cbd_bufaddr = dmabuf; + bdp->cbd_datlen = hdr_len; + + if (fep->bufdesc_ex) { + if (skb->ip_summed == CHECKSUM_PARTIAL) + estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = estatus; + } + + bdp->cbd_sc = status; + + return 0; +} + +static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int total_len, data_left; + struct bufdesc *bdp = fep->cur_tx; + struct tso_t tso; + unsigned int index = 0; + int ret; + + if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "NOT enough BD for TSO!\n"); + return NETDEV_TX_OK; + } + + /* Protocol checksum off-load for TCP and UDP. */ + if (fec_enet_clear_csum(skb, ndev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* Initialize the TSO handler, and prepare the first payload */ + tso_start(skb, &tso); + + total_len = skb->len - hdr_len; + while (total_len > 0) { + char *hdr; + + index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_left; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); + ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index); + if (ret) + goto err_release; + + while (data_left > 0) { + int size; + + size = min_t(int, tso.size, data_left); + bdp = fec_enet_get_nextdesc(bdp, fep); + index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data, + size, size == data_left, + total_len == 0); + if (ret) + goto err_release; + + data_left -= size; + tso_build_data(skb, &tso, size); + } + + bdp = fec_enet_get_nextdesc(bdp, fep); + } + + /* Save skb pointer */ + fep->tx_skbuff[index] = skb; + + fec_enet_submit_work(bdp, fep); + + skb_tx_timestamp(skb); + fep->cur_tx = bdp; + + /* Trigger transmission start */ + writel(0, fep->hwp + FEC_X_DES_ACTIVE); + + return 0; + +err_release: + /* TODO: Release all used data descriptors for TSO */ + return ret; +} + +static netdev_tx_t +fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int entries_free; + int ret; + + if (skb_is_gso(skb)) + ret = fec_enet_txq_submit_tso(skb, ndev); + else + ret = fec_enet_txq_submit_skb(skb, ndev); + if (ret) + return ret; + + entries_free = fec_enet_get_free_txdesc_num(fep); + if (entries_free <= fep->tx_stop_threshold) + netif_stop_queue(ndev); + + return NETDEV_TX_OK; +} + +/* Init RX & TX buffer descriptors + */ +static void fec_enet_bd_init(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct bufdesc *bdp; + unsigned int i; + + /* Initialize the receive buffer descriptors. */ + bdp = fep->rx_bd_base; + for (i = 0; i < fep->rx_ring_size; i++) { + + /* Initialize the BD for every fragment in the page. */ + if (bdp->cbd_bufaddr) + bdp->cbd_sc = BD_ENET_RX_EMPTY; + else + bdp->cbd_sc = 0; + bdp = fec_enet_get_nextdesc(bdp, fep); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep); + bdp->cbd_sc |= BD_SC_WRAP; + + fep->cur_rx = fep->rx_bd_base; + + /* ...and the same for transmit */ + bdp = fep->tx_bd_base; + fep->cur_tx = bdp; + for (i = 0; i < fep->tx_ring_size; i++) { + + /* Initialize the BD for every fragment in the page. */ + bdp->cbd_sc = 0; + if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { + dev_kfree_skb_any(fep->tx_skbuff[i]); + fep->tx_skbuff[i] = NULL; + } + bdp->cbd_bufaddr = 0; + bdp = fec_enet_get_nextdesc(bdp, fep); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep); + bdp->cbd_sc |= BD_SC_WRAP; + fep->dirty_tx = bdp; +} + +/* This function is called to start or restart the FEC during a link + * change. This only happens when switching between half and full + * duplex. + */ +static void +fec_restart(struct net_device *ndev, int duplex) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + int i; + u32 val; + u32 temp_mac[2]; + u32 rcntl = OPT_FRAME_SIZE | 0x04; + u32 ecntl = 0x2; /* ETHEREN */ + + if (netif_running(ndev)) { + netif_device_detach(ndev); + napi_disable(&fep->napi); + netif_stop_queue(ndev); + netif_tx_lock_bh(ndev); + } + + /* Whack a reset. We should wait for this. */ + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + + /* + * enet-mac reset will reset mac address registers too, + * so need to reconfigure it. + */ + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); + writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); + writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); + } + + /* Clear any outstanding interrupt. */ + writel(0xffc00000, fep->hwp + FEC_IEVENT); + + /* Set maximum receive buffer size. */ + writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); + + fec_enet_bd_init(ndev); + + /* Set receive and transmit descriptor base. */ + writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); + if (fep->bufdesc_ex) + writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) + * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); + else + writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) + * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); + + + for (i = 0; i <= TX_RING_MOD_MASK; i++) { + if (fep->tx_skbuff[i]) { + dev_kfree_skb_any(fep->tx_skbuff[i]); + fep->tx_skbuff[i] = NULL; + } + } + + /* Enable MII mode */ + if (duplex) { + /* FD enable */ + writel(0x04, fep->hwp + FEC_X_CNTRL); + } else { + /* No Rcv on Xmit */ + rcntl |= 0x02; + writel(0x0, fep->hwp + FEC_X_CNTRL); + } + + fep->full_duplex = duplex; + + /* Set MII speed */ + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + +#if !defined(CONFIG_M5272) + /* set RX checksum */ + val = readl(fep->hwp + FEC_RACC); + if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) + val |= FEC_RACC_OPTIONS; + else + val &= ~FEC_RACC_OPTIONS; + writel(val, fep->hwp + FEC_RACC); +#endif + + /* + * The phy interface and speed need to get configured + * differently on enet-mac. + */ + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + /* Enable flow control and length check */ + rcntl |= 0x40000000 | 0x00000020; + + /* RGMII, RMII or MII */ + if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) + rcntl |= (1 << 6); + else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) + rcntl |= (1 << 8); + else + rcntl &= ~(1 << 8); + + /* 1G, 100M or 10M */ + if (fep->phy_dev) { + if (fep->phy_dev->speed == SPEED_1000) + ecntl |= (1 << 5); + else if (fep->phy_dev->speed == SPEED_100) + rcntl &= ~(1 << 9); + else + rcntl |= (1 << 9); + } + } else { +#ifdef FEC_MIIGSK_ENR + if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { + u32 cfgr; + /* disable the gasket and wait */ + writel(0, fep->hwp + FEC_MIIGSK_ENR); + while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) + udelay(1); + + /* + * configure the gasket: + * RMII, 50 MHz, no loopback, no echo + * MII, 25 MHz, no loopback, no echo + */ + cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) + ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; + if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) + cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; + writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); + + /* re-enable the gasket */ + writel(2, fep->hwp + FEC_MIIGSK_ENR); + } +#endif + } + +#if !defined(CONFIG_M5272) + /* enable pause frame*/ + if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || + ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && + fep->phy_dev && fep->phy_dev->pause)) { + rcntl |= FEC_ENET_FCE; + + /* set FIFO threshold parameter to reduce overrun */ + writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); + writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); + writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); + writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); + + /* OPD */ + writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); + } else { + rcntl &= ~FEC_ENET_FCE; + } +#endif /* !defined(CONFIG_M5272) */ + + writel(rcntl, fep->hwp + FEC_R_CNTRL); + + /* Setup multicast filter. */ + set_multicast_list(ndev); +#ifndef CONFIG_M5272 + writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); + writel(0, fep->hwp + FEC_HASH_TABLE_LOW); +#endif + + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + /* enable ENET endian swap */ + ecntl |= (1 << 8); + /* enable ENET store and forward mode */ + writel(1 << 8, fep->hwp + FEC_X_WMRK); + } + + if (fep->bufdesc_ex) + ecntl |= (1 << 4); + +#ifndef CONFIG_M5272 + /* Enable the MIB statistic event counters */ + writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); +#endif + + /* And last, enable the transmit and receive processing */ + writel(ecntl, fep->hwp + FEC_ECNTRL); + writel(0, fep->hwp + FEC_R_DES_ACTIVE); + + if (fep->bufdesc_ex) + fec_ptp_start_cyclecounter(ndev); + + /* Enable interrupts we wish to service */ + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + + if (netif_running(ndev)) { + netif_tx_unlock_bh(ndev); + netif_wake_queue(ndev); + napi_enable(&fep->napi); + netif_device_attach(ndev); + } +} + +static void +fec_stop(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); + + /* We cannot expect a graceful transmit stop without link !!! */ + if (fep->link) { + writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ + udelay(10); + if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) + netdev_err(ndev, "Graceful transmit stop did not complete!\n"); + } + + /* Whack a reset. We should wait for this. */ + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + + /* We have to keep ENET enabled to have MII interrupt stay working */ + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + writel(2, fep->hwp + FEC_ECNTRL); + writel(rmii_mode, fep->hwp + FEC_R_CNTRL); + } +} + + +static void +fec_timeout(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + ndev->stats.tx_errors++; + + fep->delay_work.timeout = true; + schedule_delayed_work(&(fep->delay_work.delay_work), 0); +} + +static void fec_enet_work(struct work_struct *work) +{ + struct fec_enet_private *fep = + container_of(work, + struct fec_enet_private, + delay_work.delay_work.work); + + if (fep->delay_work.timeout) { + fep->delay_work.timeout = false; + fec_restart(fep->netdev, fep->full_duplex); + netif_wake_queue(fep->netdev); + } + + if (fep->delay_work.trig_tx) { + fep->delay_work.trig_tx = false; + writel(0, fep->hwp + FEC_X_DES_ACTIVE); + } +} + +static void +fec_enet_tx(struct net_device *ndev) +{ + struct fec_enet_private *fep; + struct bufdesc *bdp; + unsigned short status; + struct sk_buff *skb; + int index = 0; + int entries_free; + + fep = netdev_priv(ndev); + bdp = fep->dirty_tx; + + /* get next bdp of dirty_tx */ + bdp = fec_enet_get_nextdesc(bdp, fep); + + while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { + + /* current queue is empty */ + if (bdp == fep->cur_tx) + break; + + index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + + skb = fep->tx_skbuff[index]; + if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr)) + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + bdp->cbd_datlen, DMA_TO_DEVICE); + bdp->cbd_bufaddr = 0; + if (!skb) { + bdp = fec_enet_get_nextdesc(bdp, fep); + continue; + } + + /* Check for errors. */ + if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | + BD_ENET_TX_RL | BD_ENET_TX_UN | + BD_ENET_TX_CSL)) { + ndev->stats.tx_errors++; + if (status & BD_ENET_TX_HB) /* No heartbeat */ + ndev->stats.tx_heartbeat_errors++; + if (status & BD_ENET_TX_LC) /* Late collision */ + ndev->stats.tx_window_errors++; + if (status & BD_ENET_TX_RL) /* Retrans limit */ + ndev->stats.tx_aborted_errors++; + if (status & BD_ENET_TX_UN) /* Underrun */ + ndev->stats.tx_fifo_errors++; + if (status & BD_ENET_TX_CSL) /* Carrier lost */ + ndev->stats.tx_carrier_errors++; + } else { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + } + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && + fep->bufdesc_ex) { + struct skb_shared_hwtstamps shhwtstamps; + unsigned long flags; + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + spin_lock_irqsave(&fep->tmreg_lock, flags); + shhwtstamps.hwtstamp = ns_to_ktime( + timecounter_cyc2time(&fep->tc, ebdp->ts)); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + skb_tstamp_tx(skb, &shhwtstamps); + } + + if (status & BD_ENET_TX_READY) + netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n"); + + /* Deferred means some collisions occurred during transmit, + * but we eventually sent the packet OK. + */ + if (status & BD_ENET_TX_DEF) + ndev->stats.collisions++; + + /* Free the sk buffer associated with this last transmit */ + dev_kfree_skb_any(skb); + fep->tx_skbuff[index] = NULL; + + fep->dirty_tx = bdp; + + /* Update pointer to next buffer descriptor to be transmitted */ + bdp = fec_enet_get_nextdesc(bdp, fep); + + /* Since we have freed up a buffer, the ring is no longer full + */ + if (netif_queue_stopped(ndev)) { + entries_free = fec_enet_get_free_txdesc_num(fep); + if (entries_free >= fep->tx_wake_threshold) + netif_wake_queue(ndev); + } + } + return; +} + +/* During a receive, the cur_rx points to the current incoming buffer. + * When we update through the ring, if the next incoming buffer has + * not been given to the system, we just set the empty indicator, + * effectively tossing the packet. + */ +static int +fec_enet_rx(struct net_device *ndev, int budget) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + struct bufdesc *bdp; + unsigned short status; + struct sk_buff *skb; + ushort pkt_len; + __u8 *data; + int pkt_received = 0; + struct bufdesc_ex *ebdp = NULL; + bool vlan_packet_rcvd = false; + u16 vlan_tag; + int index = 0; + +#ifdef CONFIG_M532x + flush_cache_all(); +#endif + + /* First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = fep->cur_rx; + + while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { + + if (pkt_received >= budget) + break; + pkt_received++; + + /* Since we have allocated space to hold a complete frame, + * the last indicator should be set. + */ + if ((status & BD_ENET_RX_LAST) == 0) + netdev_err(ndev, "rcv is not +last\n"); + + if (!fep->opened) + goto rx_processing_done; + + /* Check for errors. */ + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | + BD_ENET_RX_CR | BD_ENET_RX_OV)) { + ndev->stats.rx_errors++; + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { + /* Frame too long or too short. */ + ndev->stats.rx_length_errors++; + } + if (status & BD_ENET_RX_NO) /* Frame alignment */ + ndev->stats.rx_frame_errors++; + if (status & BD_ENET_RX_CR) /* CRC Error */ + ndev->stats.rx_crc_errors++; + if (status & BD_ENET_RX_OV) /* FIFO overrun */ + ndev->stats.rx_fifo_errors++; + } + + /* Report late collisions as a frame error. + * On this error, the BD is closed, but we don't know what we + * have in the buffer. So, just drop this frame on the floor. + */ + if (status & BD_ENET_RX_CL) { + ndev->stats.rx_errors++; + ndev->stats.rx_frame_errors++; + goto rx_processing_done; + } + + /* Process the incoming frame. */ + ndev->stats.rx_packets++; + pkt_len = bdp->cbd_datlen; + ndev->stats.rx_bytes += pkt_len; + + index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep); + data = fep->rx_skbuff[index]->data; + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + + if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + swap_buffer(data, pkt_len); + + /* Extract the enhanced buffer descriptor */ + ebdp = NULL; + if (fep->bufdesc_ex) + ebdp = (struct bufdesc_ex *)bdp; + + /* If this is a VLAN packet remove the VLAN Tag */ + vlan_packet_rcvd = false; + if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && + fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + /* Push and remove the vlan tag */ + struct vlan_hdr *vlan_header = + (struct vlan_hdr *) (data + ETH_HLEN); + vlan_tag = ntohs(vlan_header->h_vlan_TCI); + pkt_len -= VLAN_HLEN; + + vlan_packet_rcvd = true; + } + + /* This does 16 byte alignment, exactly what we need. + * The packet length includes FCS, but we don't want to + * include that when passing upstream as it messes up + * bridging applications. + */ + skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN); + + if (unlikely(!skb)) { + ndev->stats.rx_dropped++; + } else { + int payload_offset = (2 * ETH_ALEN); + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, pkt_len - 4); /* Make room */ + + /* Extract the frame data without the VLAN header. */ + skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); + if (vlan_packet_rcvd) + payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; + skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), + data + payload_offset, + pkt_len - 4 - (2 * ETH_ALEN)); + + skb->protocol = eth_type_trans(skb, ndev); + + /* Get receive timestamp from the skb */ + if (fep->hwts_rx_en && fep->bufdesc_ex) { + struct skb_shared_hwtstamps *shhwtstamps = + skb_hwtstamps(skb); + unsigned long flags; + + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + spin_lock_irqsave(&fep->tmreg_lock, flags); + shhwtstamps->hwtstamp = ns_to_ktime( + timecounter_cyc2time(&fep->tc, ebdp->ts)); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + } + + if (fep->bufdesc_ex && + (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { + if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { + /* don't check it */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); + } + } + + /* Handle received VLAN packets */ + if (vlan_packet_rcvd) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + + napi_gro_receive(&fep->napi, skb); + } + + dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); +rx_processing_done: + /* Clear the status flags for this buffer */ + status &= ~BD_ENET_RX_STATS; + + /* Mark the buffer empty */ + status |= BD_ENET_RX_EMPTY; + bdp->cbd_sc = status; + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_prot = 0; + ebdp->cbd_bdu = 0; + } + + /* Update BD pointer to next entry */ + bdp = fec_enet_get_nextdesc(bdp, fep); + + /* Doing this here will keep the FEC running while we process + * incoming frames. On a heavily loaded network, we should be + * able to keep up at the expense of system resources. + */ + writel(0, fep->hwp + FEC_R_DES_ACTIVE); + } + fep->cur_rx = bdp; + + return pkt_received; +} + +static irqreturn_t +fec_enet_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct fec_enet_private *fep = netdev_priv(ndev); + uint int_events; + irqreturn_t ret = IRQ_NONE; + + do { + int_events = readl(fep->hwp + FEC_IEVENT); + writel(int_events, fep->hwp + FEC_IEVENT); + + if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) { + ret = IRQ_HANDLED; + + /* Disable the RX interrupt */ + if (napi_schedule_prep(&fep->napi)) { + writel(FEC_RX_DISABLED_IMASK, + fep->hwp + FEC_IMASK); + __napi_schedule(&fep->napi); + } + } + + if (int_events & FEC_ENET_MII) { + ret = IRQ_HANDLED; + complete(&fep->mdio_done); + } + } while (int_events); + + return ret; +} + +static int fec_enet_rx_napi(struct napi_struct *napi, int budget) +{ + struct net_device *ndev = napi->dev; + int pkts = fec_enet_rx(ndev, budget); + struct fec_enet_private *fep = netdev_priv(ndev); + + fec_enet_tx(ndev); + + if (pkts < budget) { + napi_complete(napi); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + } + return pkts; +} + +/* ------------------------------------------------------------------------- */ +static void fec_get_mac(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); + unsigned char *iap, tmpaddr[ETH_ALEN]; + + /* + * try to get mac address in following order: + * + * 1) module parameter via kernel command line in form + * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 + */ + iap = macaddr; + + /* + * 2) from device tree data + */ + if (!is_valid_ether_addr(iap)) { + struct device_node *np = fep->pdev->dev.of_node; + if (np) { + const char *mac = of_get_mac_address(np); + if (mac) + iap = (unsigned char *) mac; + } + } + + /* + * 3) from flash or fuse (via platform data) + */ + if (!is_valid_ether_addr(iap)) { +#ifdef CONFIG_M5272 + if (FEC_FLASHMAC) + iap = (unsigned char *)FEC_FLASHMAC; +#else + if (pdata) + iap = (unsigned char *)&pdata->mac; +#endif + } + + /* + * 4) FEC mac registers set by bootloader + */ + if (!is_valid_ether_addr(iap)) { + *((__be32 *) &tmpaddr[0]) = + cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); + *((__be16 *) &tmpaddr[4]) = + cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); + iap = &tmpaddr[0]; + } + + /* + * 5) random mac address + */ + if (!is_valid_ether_addr(iap)) { + /* Report it and use a random ethernet address instead */ + netdev_err(ndev, "Invalid MAC address: %pM\n", iap); + eth_hw_addr_random(ndev); + netdev_info(ndev, "Using random MAC address: %pM\n", + ndev->dev_addr); + return; + } + + memcpy(ndev->dev_addr, iap, ETH_ALEN); + + /* Adjust MAC if using macaddr */ + if (iap == macaddr) + ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; +} + +/* ------------------------------------------------------------------------- */ + +/* + * Phy section + */ +static void fec_enet_adjust_link(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy_dev = fep->phy_dev; + int status_change = 0; + + /* Prevent a state halted on mii error */ + if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { + phy_dev->state = PHY_RESUMING; + return; + } + + if (phy_dev->link) { + if (!fep->link) { + fep->link = phy_dev->link; + status_change = 1; + } + + if (fep->full_duplex != phy_dev->duplex) + status_change = 1; + + if (phy_dev->speed != fep->speed) { + fep->speed = phy_dev->speed; + status_change = 1; + } + + /* if any of the above changed restart the FEC */ + if (status_change) + fec_restart(ndev, phy_dev->duplex); + } else { + if (fep->link) { + fec_stop(ndev); + fep->link = phy_dev->link; + status_change = 1; + } + } + + if (status_change) + phy_print_status(phy_dev); +} + +static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct fec_enet_private *fep = bus->priv; + unsigned long time_left; + + fep->mii_timeout = 0; + init_completion(&fep->mdio_done); + + /* start a read op */ + writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | + FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + time_left = wait_for_completion_timeout(&fep->mdio_done, + usecs_to_jiffies(FEC_MII_TIMEOUT)); + if (time_left == 0) { + fep->mii_timeout = 1; + netdev_err(fep->netdev, "MDIO read timeout\n"); + return -ETIMEDOUT; + } + + /* return value */ + return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); +} + +static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct fec_enet_private *fep = bus->priv; + unsigned long time_left; + + fep->mii_timeout = 0; + init_completion(&fep->mdio_done); + + /* start a write op */ + writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | + FEC_MMFR_TA | FEC_MMFR_DATA(value), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + time_left = wait_for_completion_timeout(&fep->mdio_done, + usecs_to_jiffies(FEC_MII_TIMEOUT)); + if (time_left == 0) { + fep->mii_timeout = 1; + netdev_err(fep->netdev, "MDIO write timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int fec_enet_clk_enable(struct net_device *ndev, bool enable) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int ret; + + if (enable) { + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + return ret; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + if (fep->clk_enet_out) { + ret = clk_prepare_enable(fep->clk_enet_out); + if (ret) + goto failed_clk_enet_out; + } + if (fep->clk_ptp) { + ret = clk_prepare_enable(fep->clk_ptp); + if (ret) + goto failed_clk_ptp; + } + } else { + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); + if (fep->clk_ptp) + clk_disable_unprepare(fep->clk_ptp); + } + + return 0; +failed_clk_ptp: + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); +failed_clk_enet_out: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); + + return ret; +} + +static int fec_enet_mii_probe(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + struct phy_device *phy_dev = NULL; + char mdio_bus_id[MII_BUS_ID_SIZE]; + char phy_name[MII_BUS_ID_SIZE + 3]; + int phy_id; + int dev_id = fep->dev_id; + + fep->phy_dev = NULL; + + /* check for attached phy */ + for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { + if ((fep->mii_bus->phy_mask & (1 << phy_id))) + continue; + if (fep->mii_bus->phy_map[phy_id] == NULL) + continue; + if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) + continue; + if (dev_id--) + continue; + strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + break; + } + + if (phy_id >= PHY_MAX_ADDR) { + netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); + strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + phy_id = 0; + } + + snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id); + phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, + fep->phy_interface); + if (IS_ERR(phy_dev)) { + netdev_err(ndev, "could not attach to PHY\n"); + return PTR_ERR(phy_dev); + } + + /* mask with MAC supported features */ + if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { + phy_dev->supported &= PHY_GBIT_FEATURES; +#if !defined(CONFIG_M5272) + phy_dev->supported |= SUPPORTED_Pause; +#endif + } + else + phy_dev->supported &= PHY_BASIC_FEATURES; + + phy_dev->advertising = phy_dev->supported; + + fep->phy_dev = phy_dev; + fep->link = 0; + fep->full_duplex = 0; + + netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), + fep->phy_dev->irq); + + return 0; +} + +static int fec_enet_mii_init(struct platform_device *pdev) +{ + static struct mii_bus *fec0_mii_bus; + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + int err = -ENXIO, i; + + /* + * The dual fec interfaces are not equivalent with enet-mac. + * Here are the differences: + * + * - fec0 supports MII & RMII modes while fec1 only supports RMII + * - fec0 acts as the 1588 time master while fec1 is slave + * - external phys can only be configured by fec0 + * + * That is to say fec1 can not work independently. It only works + * when fec0 is working. The reason behind this design is that the + * second interface is added primarily for Switch mode. + * + * Because of the last point above, both phys are attached on fec0 + * mdio interface in board design, and need to be configured by + * fec0 mii_bus. + */ + if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { + /* fec1 uses fec0 mii_bus */ + if (mii_cnt && fec0_mii_bus) { + fep->mii_bus = fec0_mii_bus; + mii_cnt++; + return 0; + } + return -ENOENT; + } + + fep->mii_timeout = 0; + + /* + * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) + * + * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while + * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 + * Reference Manual has an error on this, and gets fixed on i.MX6Q + * document. + */ + fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + fep->phy_speed--; + fep->phy_speed <<= 1; + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + + fep->mii_bus = mdiobus_alloc(); + if (fep->mii_bus == NULL) { + err = -ENOMEM; + goto err_out; + } + + fep->mii_bus->name = "fec_enet_mii_bus"; + fep->mii_bus->read = fec_enet_mdio_read; + fep->mii_bus->write = fec_enet_mdio_write; + snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, fep->dev_id + 1); + fep->mii_bus->priv = fep; + fep->mii_bus->parent = &pdev->dev; + + fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!fep->mii_bus->irq) { + err = -ENOMEM; + goto err_out_free_mdiobus; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + fep->mii_bus->irq[i] = PHY_POLL; + + if (mdiobus_register(fep->mii_bus)) + goto err_out_free_mdio_irq; + + mii_cnt++; + + /* save fec0 mii_bus */ + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + fec0_mii_bus = fep->mii_bus; + + return 0; + +err_out_free_mdio_irq: + kfree(fep->mii_bus->irq); +err_out_free_mdiobus: + mdiobus_free(fep->mii_bus); +err_out: + return err; +} + +static void fec_enet_mii_remove(struct fec_enet_private *fep) +{ + if (--mii_cnt == 0) { + mdiobus_unregister(fep->mii_bus); + kfree(fep->mii_bus->irq); + mdiobus_free(fep->mii_bus); + } +} + +static int fec_enet_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phydev = fep->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int fec_enet_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phydev = fep->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static void fec_enet_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + strlcpy(info->driver, fep->pdev->dev.driver->name, + sizeof(info->driver)); + strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); + strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); +} + +static int fec_enet_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (fep->bufdesc_ex) { + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + if (fep->ptp_clock) + info->phc_index = ptp_clock_index(fep->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + return 0; + } else { + return ethtool_op_get_ts_info(ndev, info); + } +} + +#if !defined(CONFIG_M5272) + +static void fec_enet_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; + pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; + pause->rx_pause = pause->tx_pause; +} + +static int fec_enet_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (pause->tx_pause != pause->rx_pause) { + netdev_info(ndev, + "hardware only support enable/disable both tx and rx"); + return -EINVAL; + } + + fep->pause_flag = 0; + + /* tx pause must be same as rx pause */ + fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; + fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; + + if (pause->rx_pause || pause->autoneg) { + fep->phy_dev->supported |= ADVERTISED_Pause; + fep->phy_dev->advertising |= ADVERTISED_Pause; + } else { + fep->phy_dev->supported &= ~ADVERTISED_Pause; + fep->phy_dev->advertising &= ~ADVERTISED_Pause; + } + + if (pause->autoneg) { + if (netif_running(ndev)) + fec_stop(ndev); + phy_start_aneg(fep->phy_dev); + } + if (netif_running(ndev)) + fec_restart(ndev, 0); + + return 0; +} + +static const struct fec_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} fec_stats[] = { + /* RMON TX */ + { "tx_dropped", RMON_T_DROP }, + { "tx_packets", RMON_T_PACKETS }, + { "tx_broadcast", RMON_T_BC_PKT }, + { "tx_multicast", RMON_T_MC_PKT }, + { "tx_crc_errors", RMON_T_CRC_ALIGN }, + { "tx_undersize", RMON_T_UNDERSIZE }, + { "tx_oversize", RMON_T_OVERSIZE }, + { "tx_fragment", RMON_T_FRAG }, + { "tx_jabber", RMON_T_JAB }, + { "tx_collision", RMON_T_COL }, + { "tx_64byte", RMON_T_P64 }, + { "tx_65to127byte", RMON_T_P65TO127 }, + { "tx_128to255byte", RMON_T_P128TO255 }, + { "tx_256to511byte", RMON_T_P256TO511 }, + { "tx_512to1023byte", RMON_T_P512TO1023 }, + { "tx_1024to2047byte", RMON_T_P1024TO2047 }, + { "tx_GTE2048byte", RMON_T_P_GTE2048 }, + { "tx_octets", RMON_T_OCTETS }, + + /* IEEE TX */ + { "IEEE_tx_drop", IEEE_T_DROP }, + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, + { "IEEE_tx_1col", IEEE_T_1COL }, + { "IEEE_tx_mcol", IEEE_T_MCOL }, + { "IEEE_tx_def", IEEE_T_DEF }, + { "IEEE_tx_lcol", IEEE_T_LCOL }, + { "IEEE_tx_excol", IEEE_T_EXCOL }, + { "IEEE_tx_macerr", IEEE_T_MACERR }, + { "IEEE_tx_cserr", IEEE_T_CSERR }, + { "IEEE_tx_sqe", IEEE_T_SQE }, + { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, + + /* RMON RX */ + { "rx_packets", RMON_R_PACKETS }, + { "rx_broadcast", RMON_R_BC_PKT }, + { "rx_multicast", RMON_R_MC_PKT }, + { "rx_crc_errors", RMON_R_CRC_ALIGN }, + { "rx_undersize", RMON_R_UNDERSIZE }, + { "rx_oversize", RMON_R_OVERSIZE }, + { "rx_fragment", RMON_R_FRAG }, + { "rx_jabber", RMON_R_JAB }, + { "rx_64byte", RMON_R_P64 }, + { "rx_65to127byte", RMON_R_P65TO127 }, + { "rx_128to255byte", RMON_R_P128TO255 }, + { "rx_256to511byte", RMON_R_P256TO511 }, + { "rx_512to1023byte", RMON_R_P512TO1023 }, + { "rx_1024to2047byte", RMON_R_P1024TO2047 }, + { "rx_GTE2048byte", RMON_R_P_GTE2048 }, + { "rx_octets", RMON_R_OCTETS }, + + /* IEEE RX */ + { "IEEE_rx_drop", IEEE_R_DROP }, + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, + { "IEEE_rx_crc", IEEE_R_CRC }, + { "IEEE_rx_align", IEEE_R_ALIGN }, + { "IEEE_rx_macerr", IEEE_R_MACERR }, + { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, +}; + +static void fec_enet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct fec_enet_private *fep = netdev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + data[i] = readl(fep->hwp + fec_stats[i].offset); +} + +static void fec_enet_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + fec_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static int fec_enet_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(fec_stats); + default: + return -EOPNOTSUPP; + } +} +#endif /* !defined(CONFIG_M5272) */ + +static int fec_enet_nway_reset(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phy_dev; + + if (!phydev) + return -ENODEV; + + return genphy_restart_aneg(phydev); +} + +static const struct ethtool_ops fec_enet_ethtool_ops = { +#if !defined(CONFIG_M5272) + .get_pauseparam = fec_enet_get_pauseparam, + .set_pauseparam = fec_enet_set_pauseparam, +#endif + .get_settings = fec_enet_get_settings, + .set_settings = fec_enet_set_settings, + .get_drvinfo = fec_enet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ts_info = fec_enet_get_ts_info, + .nway_reset = fec_enet_nway_reset, +#ifndef CONFIG_M5272 + .get_ethtool_stats = fec_enet_get_ethtool_stats, + .get_strings = fec_enet_get_strings, + .get_sset_count = fec_enet_get_sset_count, +#endif +}; + +static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phydev = fep->phy_dev; + + if (!netif_running(ndev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + if (fep->bufdesc_ex) { + if (cmd == SIOCSHWTSTAMP) + return fec_ptp_set(ndev, rq); + if (cmd == SIOCGHWTSTAMP) + return fec_ptp_get(ndev, rq); + } + + return phy_mii_ioctl(phydev, rq, cmd); +} + +static void fec_enet_free_buffers(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + struct sk_buff *skb; + struct bufdesc *bdp; + + bdp = fep->rx_bd_base; + for (i = 0; i < fep->rx_ring_size; i++) { + skb = fep->rx_skbuff[i]; + + if (bdp->cbd_bufaddr) + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + if (skb) + dev_kfree_skb(skb); + bdp = fec_enet_get_nextdesc(bdp, fep); + } + + bdp = fep->tx_bd_base; + for (i = 0; i < fep->tx_ring_size; i++) + kfree(fep->tx_bounce[i]); +} + +static int fec_enet_alloc_buffers(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + struct sk_buff *skb; + struct bufdesc *bdp; + + bdp = fep->rx_bd_base; + for (i = 0; i < fep->rx_ring_size; i++) { + skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); + if (!skb) { + fec_enet_free_buffers(ndev); + return -ENOMEM; + } + fep->rx_skbuff[i] = skb; + + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + fec_enet_free_buffers(ndev); + if (net_ratelimit()) + netdev_err(ndev, "Rx DMA memory map failed\n"); + return -ENOMEM; + } + bdp->cbd_sc = BD_ENET_RX_EMPTY; + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = BD_ENET_RX_INT; + } + + bdp = fec_enet_get_nextdesc(bdp, fep); + } + + /* Set the last buffer to wrap. */ + bdp = fec_enet_get_prevdesc(bdp, fep); + bdp->cbd_sc |= BD_SC_WRAP; + + bdp = fep->tx_bd_base; + for (i = 0; i < fep->tx_ring_size; i++) { + fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); + + bdp->cbd_sc = 0; + bdp->cbd_bufaddr = 0; + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = BD_ENET_TX_INT; + } + + bdp = fec_enet_get_nextdesc(bdp, fep); + } + + /* Set the last buffer to wrap. */ + bdp = fec_enet_get_prevdesc(bdp, fep); + bdp->cbd_sc |= BD_SC_WRAP; + + return 0; +} + +static int +fec_enet_open(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int ret; + + pinctrl_pm_select_default_state(&fep->pdev->dev); + ret = fec_enet_clk_enable(ndev, true); + if (ret) + return ret; + + /* I should reset the ring buffers here, but I don't yet know + * a simple way to do that. + */ + + ret = fec_enet_alloc_buffers(ndev); + if (ret) + return ret; + + /* Probe and connect to PHY when open the interface */ + ret = fec_enet_mii_probe(ndev); + if (ret) { + fec_enet_free_buffers(ndev); + return ret; + } + + napi_enable(&fep->napi); + phy_start(fep->phy_dev); + netif_start_queue(ndev); + fep->opened = 1; + return 0; +} + +static int +fec_enet_close(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + /* Don't know what to do yet. */ + napi_disable(&fep->napi); + fep->opened = 0; + netif_stop_queue(ndev); + fec_stop(ndev); + + if (fep->phy_dev) { + phy_stop(fep->phy_dev); + phy_disconnect(fep->phy_dev); + } + + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); + fec_enet_free_buffers(ndev); + + return 0; +} + +/* Set or clear the multicast filter for this adaptor. + * Skeleton taken from sunlance driver. + * The CPM Ethernet implementation allows Multicast as well as individual + * MAC address filtering. Some of the drivers check to make sure it is + * a group multicast address, and discard those that are not. I guess I + * will do the same for now, but just remove the test if you want + * individual filtering as well (do the upper net layers want or support + * this kind of feature?). + */ + +#define HASH_BITS 6 /* #bits in hash */ +#define CRC32_POLY 0xEDB88320 + +static void set_multicast_list(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct netdev_hw_addr *ha; + unsigned int i, bit, data, crc, tmp; + unsigned char hash; + + if (ndev->flags & IFF_PROMISC) { + tmp = readl(fep->hwp + FEC_R_CNTRL); + tmp |= 0x8; + writel(tmp, fep->hwp + FEC_R_CNTRL); + return; + } + + tmp = readl(fep->hwp + FEC_R_CNTRL); + tmp &= ~0x8; + writel(tmp, fep->hwp + FEC_R_CNTRL); + + if (ndev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's + */ + writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); + + return; + } + + /* Clear filter and add the addresses in hash register + */ + writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); + + netdev_for_each_mc_addr(ha, ndev) { + /* calculate crc32 value of mac address */ + crc = 0xffffffff; + + for (i = 0; i < ndev->addr_len; i++) { + data = ha->addr[i]; + for (bit = 0; bit < 8; bit++, data >>= 1) { + crc = (crc >> 1) ^ + (((crc ^ data) & 1) ? CRC32_POLY : 0); + } + } + + /* only upper 6 bits (HASH_BITS) are used + * which point to specific bit in he hash registers + */ + hash = (crc >> (32 - HASH_BITS)) & 0x3f; + + if (hash > 31) { + tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + tmp |= 1 << (hash - 32); + writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + } else { + tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); + tmp |= 1 << hash; + writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); + } + } +} + +/* Set a MAC change in hardware. */ +static int +fec_set_mac_address(struct net_device *ndev, void *p) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct sockaddr *addr = p; + + if (addr) { + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + } + + writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | + (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), + fep->hwp + FEC_ADDR_LOW); + writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), + fep->hwp + FEC_ADDR_HIGH); + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * fec_poll_controller - FEC Poll controller function + * @dev: The FEC network adapter + * + * Polled functionality used by netconsole and others in non interrupt mode + * + */ +static void fec_poll_controller(struct net_device *dev) +{ + int i; + struct fec_enet_private *fep = netdev_priv(dev); + + for (i = 0; i < FEC_IRQ_NUM; i++) { + if (fep->irq[i] > 0) { + disable_irq(fep->irq[i]); + fec_enet_interrupt(fep->irq[i], dev); + enable_irq(fep->irq[i]); + } + } +} +#endif + +static int fec_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + netdev->features = features; + + /* Receive checksum has been changed */ + if (changed & NETIF_F_RXCSUM) { + if (features & NETIF_F_RXCSUM) + fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + else + fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; + + if (netif_running(netdev)) { + fec_stop(netdev); + fec_restart(netdev, fep->phy_dev->duplex); + netif_wake_queue(netdev); + } else { + fec_restart(netdev, fep->phy_dev->duplex); + } + } + + return 0; +} + +static const struct net_device_ops fec_netdev_ops = { + .ndo_open = fec_enet_open, + .ndo_stop = fec_enet_close, + .ndo_start_xmit = fec_enet_start_xmit, + .ndo_set_rx_mode = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = fec_timeout, + .ndo_set_mac_address = fec_set_mac_address, + .ndo_do_ioctl = fec_enet_ioctl, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fec_poll_controller, +#endif + .ndo_set_features = fec_set_features, +}; + + /* + * XXX: We need to clean up on failure exits here. + * + */ +static int fec_enet_init(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + struct bufdesc *cbd_base; + int bd_size; + + /* init the tx & rx ring size */ + fep->tx_ring_size = TX_RING_SIZE; + fep->rx_ring_size = RX_RING_SIZE; + + fep->tx_stop_threshold = FEC_MAX_SKB_DESCS; + fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2; + + if (fep->bufdesc_ex) + fep->bufdesc_size = sizeof(struct bufdesc_ex); + else + fep->bufdesc_size = sizeof(struct bufdesc); + bd_size = (fep->tx_ring_size + fep->rx_ring_size) * + fep->bufdesc_size; + + /* Allocate memory for buffer descriptors. */ + cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma, + GFP_KERNEL); + if (!cbd_base) + return -ENOMEM; + + fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE, + &fep->tso_hdrs_dma, GFP_KERNEL); + if (!fep->tso_hdrs) { + dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma); + return -ENOMEM; + } + + memset(cbd_base, 0, PAGE_SIZE); + + fep->netdev = ndev; + + /* Get the Ethernet address */ + fec_get_mac(ndev); + /* make sure MAC we just acquired is programmed into the hw */ + fec_set_mac_address(ndev, NULL); + + /* Set receive and transmit descriptor base. */ + fep->rx_bd_base = cbd_base; + if (fep->bufdesc_ex) + fep->tx_bd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); + else + fep->tx_bd_base = cbd_base + fep->rx_ring_size; + + /* The FEC Ethernet specific entries in the device structure */ + ndev->watchdog_timeo = TX_TIMEOUT; + ndev->netdev_ops = &fec_netdev_ops; + ndev->ethtool_ops = &fec_enet_ethtool_ops; + + writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); + netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); + + if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) + /* enable hw VLAN support */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + + if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { + ndev->gso_max_segs = FEC_MAX_TSO_SEGS; + + /* enable hw accelerator */ + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); + fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + } + + ndev->hw_features = ndev->features; + + fec_restart(ndev, 0); + + return 0; +} + +#ifdef CONFIG_OF +static void fec_reset_phy(struct platform_device *pdev) +{ + int err, phy_reset; + int msec = 1; + struct device_node *np = pdev->dev.of_node; + + if (!np) + return; + + of_property_read_u32(np, "phy-reset-duration", &msec); + /* A sane reset duration should not be longer than 1s */ + if (msec > 1000) + msec = 1; + + phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); + if (!gpio_is_valid(phy_reset)) + return; + + err = devm_gpio_request_one(&pdev->dev, phy_reset, + GPIOF_OUT_INIT_LOW, "phy-reset"); + if (err) { + dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); + return; + } + msleep(msec); + gpio_set_value(phy_reset, 1); +} +#else /* CONFIG_OF */ +static void fec_reset_phy(struct platform_device *pdev) +{ + /* + * In case of platform probe, the reset has been done + * by machine code. + */ +} +#endif /* CONFIG_OF */ + +static int +fec_probe(struct platform_device *pdev) +{ + struct fec_enet_private *fep; + struct fec_platform_data *pdata; + struct net_device *ndev; + int i, irq, ret = 0; + struct resource *r; + const struct of_device_id *of_id; + static int dev_id; + + of_id = of_match_device(fec_dt_ids, &pdev->dev); + if (of_id) + pdev->id_entry = of_id->data; + + /* Init network device */ + ndev = alloc_etherdev(sizeof(struct fec_enet_private)); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* setup board info structure */ + fep = netdev_priv(ndev); + +#if !defined(CONFIG_M5272) + /* default enable pause frame auto negotiation */ + if (pdev->id_entry && + (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) + fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; +#endif + + /* Select default pin state */ + pinctrl_pm_select_default_state(&pdev->dev); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + fep->hwp = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(fep->hwp)) { + ret = PTR_ERR(fep->hwp); + goto failed_ioremap; + } + + fep->pdev = pdev; + fep->dev_id = dev_id++; + + fep->bufdesc_ex = 0; + + platform_set_drvdata(pdev, ndev); + + ret = of_get_phy_mode(pdev->dev.of_node); + if (ret < 0) { + pdata = dev_get_platdata(&pdev->dev); + if (pdata) + fep->phy_interface = pdata->phy; + else + fep->phy_interface = PHY_INTERFACE_MODE_MII; + } else { + fep->phy_interface = ret; + } + + fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(fep->clk_ipg)) { + ret = PTR_ERR(fep->clk_ipg); + goto failed_clk; + } + + fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(fep->clk_ahb)) { + ret = PTR_ERR(fep->clk_ahb); + goto failed_clk; + } + + /* enet_out is optional, depends on board */ + fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); + if (IS_ERR(fep->clk_enet_out)) + fep->clk_enet_out = NULL; + + fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); + fep->bufdesc_ex = + pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; + if (IS_ERR(fep->clk_ptp)) { + fep->clk_ptp = NULL; + fep->bufdesc_ex = 0; + } + + ret = fec_enet_clk_enable(ndev, true); + if (ret) + goto failed_clk; + + fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); + if (!IS_ERR(fep->reg_phy)) { + ret = regulator_enable(fep->reg_phy); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable phy regulator: %d\n", ret); + goto failed_regulator; + } + } else { + fep->reg_phy = NULL; + } + + fec_reset_phy(pdev); + + if (fep->bufdesc_ex) + fec_ptp_init(pdev); + + ret = fec_enet_init(ndev); + if (ret) + goto failed_init; + + for (i = 0; i < FEC_IRQ_NUM; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) { + if (i) + break; + ret = irq; + goto failed_irq; + } + ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, + 0, pdev->name, ndev); + if (ret) + goto failed_irq; + } + + ret = fec_enet_mii_init(pdev); + if (ret) + goto failed_mii_init; + + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(ndev); + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&pdev->dev); + + ret = register_netdev(ndev); + if (ret) + goto failed_register; + + if (fep->bufdesc_ex && fep->ptp_clock) + netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); + + INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work); + return 0; + +failed_register: + fec_enet_mii_remove(fep); +failed_mii_init: +failed_irq: +failed_init: + if (fep->reg_phy) + regulator_disable(fep->reg_phy); +failed_regulator: + fec_enet_clk_enable(ndev, false); +failed_clk: +failed_ioremap: + free_netdev(ndev); + + return ret; +} + +static int +fec_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + cancel_delayed_work_sync(&(fep->delay_work.delay_work)); + unregister_netdev(ndev); + fec_enet_mii_remove(fep); + del_timer_sync(&fep->time_keep); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + if (fep->ptp_clock) + ptp_clock_unregister(fep->ptp_clock); + fec_enet_clk_enable(ndev, false); + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int +fec_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + if (netif_running(ndev)) { + fec_stop(ndev); + netif_device_detach(ndev); + } + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); + + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + + return 0; +} + +static int +fec_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + int ret; + + if (fep->reg_phy) { + ret = regulator_enable(fep->reg_phy); + if (ret) + return ret; + } + + pinctrl_pm_select_default_state(&fep->pdev->dev); + ret = fec_enet_clk_enable(ndev, true); + if (ret) + goto failed_clk; + + if (netif_running(ndev)) { + fec_restart(ndev, fep->full_duplex); + netif_device_attach(ndev); + } + + return 0; + +failed_clk: + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + return ret; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); + +static struct platform_driver fec_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &fec_pm_ops, + .of_match_table = fec_dt_ids, + }, + .id_table = fec_devtype, + .probe = fec_probe, + .remove = fec_drv_remove, +}; + +module_platform_driver(fec_driver); + +MODULE_ALIAS("platform:"DRIVER_NAME); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 30745b56fe5..9947765e90c 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -14,6 +14,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/dma-mapping.h> #include <linux/module.h> @@ -29,6 +31,7 @@ #include <linux/delay.h> #include <linux/of_device.h> #include <linux/of_mdio.h> +#include <linux/of_net.h> #include <linux/of_platform.h> #include <linux/netdevice.h> @@ -40,8 +43,8 @@ #include <asm/delay.h> #include <asm/mpc52xx.h> -#include <sysdev/bestcomm/bestcomm.h> -#include <sysdev/bestcomm/fec.h> +#include <linux/fsl/bestcomm/bestcomm.h> +#include <linux/fsl/bestcomm/fec.h> #include "fec_mpc52xx.h" @@ -76,10 +79,6 @@ static void mpc52xx_fec_stop(struct net_device *dev); static void mpc52xx_fec_start(struct net_device *dev); static void mpc52xx_fec_reset(struct net_device *dev); -static u8 mpc52xx_fec_mac_addr[6]; -module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0); -MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe"); - #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) static int debug = -1; /* the above default */ @@ -110,15 +109,6 @@ static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac) out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE); } -static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac) -{ - struct mpc52xx_fec_priv *priv = netdev_priv(dev); - struct mpc52xx_fec __iomem *fec = priv->fec; - - *(u32 *)(&mac[0]) = in_be32(&fec->paddr1); - *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16; -} - static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sock = addr; @@ -160,7 +150,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task struct sk_buff *skb; while (!bcom_queue_full(rxtsk)) { - skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); + skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE); if (!skb) return -EAGAIN; @@ -416,7 +406,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) /* skbs are allocated on open, so now we allocate a new one, * and remove the old (with the packet) */ - skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); + skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE); if (!skb) { /* Can't get a new one : reuse the same & drop pkt */ dev_notice(&dev->dev, "Low memory - dropped packet.\n"); @@ -437,7 +427,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) length = status & BCOM_FEC_RX_BD_LEN_MASK; skb_put(rskb, length - 4); /* length without CRC32 */ rskb->protocol = eth_type_trans(rskb, dev); - if (!skb_defer_rx_timestamp(skb)) + if (!skb_defer_rx_timestamp(rskb)) netif_rx(rskb); spin_lock(&priv->lock); @@ -811,6 +801,7 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { .get_link = ethtool_op_get_link, .get_msglevel = mpc52xx_fec_get_msglevel, .set_msglevel = mpc52xx_fec_set_msglevel, + .get_ts_info = ethtool_op_get_ts_info, }; @@ -844,7 +835,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = { /* OF Driver */ /* ======================================================================== */ -static int __devinit mpc52xx_fec_probe(struct platform_device *op) +static int mpc52xx_fec_probe(struct platform_device *op) { int rv; struct net_device *ndev; @@ -852,6 +843,8 @@ static int __devinit mpc52xx_fec_probe(struct platform_device *op) struct resource mem; const u32 *prop; int prop_size; + struct device_node *np = op->dev.of_node; + const char *mac_addr; phys_addr_t rx_fifo; phys_addr_t tx_fifo; @@ -865,15 +858,13 @@ static int __devinit mpc52xx_fec_probe(struct platform_device *op) priv->ndev = ndev; /* Reserve FEC control zone */ - rv = of_address_to_resource(op->dev.of_node, 0, &mem); + rv = of_address_to_resource(np, 0, &mem); if (rv) { - printk(KERN_ERR DRIVER_NAME ": " - "Error while parsing device node resource\n" ); + pr_err("Error while parsing device node resource\n"); goto err_netdev; } if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) { - printk(KERN_ERR DRIVER_NAME - " - invalid resource size (%lx < %x), check mpc52xx_devices.c\n", + pr_err("invalid resource size (%lx < %x), check mpc52xx_devices.c\n", (unsigned long)resource_size(&mem), sizeof(struct mpc52xx_fec)); rv = -EINVAL; @@ -911,14 +902,14 @@ static int __devinit mpc52xx_fec_probe(struct platform_device *op) priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo); if (!priv->rx_dmatsk || !priv->tx_dmatsk) { - printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" ); + pr_err("Can not init SDMA tasks\n"); rv = -ENOMEM; goto err_rx_tx_dmatsk; } /* Get the IRQ we need one by one */ /* Control */ - ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0); + ndev->irq = irq_of_parse_and_map(np, 0); /* RX */ priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk); @@ -926,11 +917,33 @@ static int __devinit mpc52xx_fec_probe(struct platform_device *op) /* TX */ priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk); - /* MAC address init */ - if (!is_zero_ether_addr(mpc52xx_fec_mac_addr)) - memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6); - else - mpc52xx_fec_get_paddr(ndev, ndev->dev_addr); + /* + * MAC address init: + * + * First try to read MAC address from DT + */ + mac_addr = of_get_mac_address(np); + if (mac_addr) { + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + } else { + struct mpc52xx_fec __iomem *fec = priv->fec; + + /* + * If the MAC addresse is not provided via DT then read + * it back from the controller regs + */ + *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1); + *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16; + } + + /* + * Check if the MAC address is valid, if not get a random one + */ + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(&ndev->dev, "using random MAC address %pM\n", + ndev->dev_addr); + } priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT); @@ -941,20 +954,20 @@ static int __devinit mpc52xx_fec_probe(struct platform_device *op) /* Start with safe defaults for link connection */ priv->speed = 100; priv->duplex = DUPLEX_HALF; - priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1; + priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1; /* The current speed preconfigures the speed of the MII link */ - prop = of_get_property(op->dev.of_node, "current-speed", &prop_size); + prop = of_get_property(np, "current-speed", &prop_size); if (prop && (prop_size >= sizeof(u32) * 2)) { priv->speed = prop[0]; priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF; } /* If there is a phy handle, then get the PHY node */ - priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); + priv->phy_node = of_parse_phandle(np, "phy-handle", 0); /* the 7-wire property means don't use MII mode */ - if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) { + if (of_find_property(np, "fsl,7-wire-mode", NULL)) { priv->seven_wire_mode = 1; dev_info(&ndev->dev, "using 7-wire PHY mode\n"); } @@ -968,7 +981,9 @@ static int __devinit mpc52xx_fec_probe(struct platform_device *op) goto err_node; /* We're done ! */ - dev_set_drvdata(&op->dev, ndev); + platform_set_drvdata(op, ndev); + netdev_info(ndev, "%s MAC %pM\n", + op->dev.of_node->full_name, ndev->dev_addr); return 0; @@ -995,7 +1010,7 @@ mpc52xx_fec_remove(struct platform_device *op) struct net_device *ndev; struct mpc52xx_fec_priv *priv; - ndev = dev_get_drvdata(&op->dev); + ndev = platform_get_drvdata(op); priv = netdev_priv(ndev); unregister_netdev(ndev); @@ -1015,14 +1030,13 @@ mpc52xx_fec_remove(struct platform_device *op) free_netdev(ndev); - dev_set_drvdata(&op->dev, NULL); return 0; } #ifdef CONFIG_PM static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state) { - struct net_device *dev = dev_get_drvdata(&op->dev); + struct net_device *dev = platform_get_drvdata(op); if (netif_running(dev)) mpc52xx_fec_close(dev); @@ -1032,7 +1046,7 @@ static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state static int mpc52xx_fec_of_resume(struct platform_device *op) { - struct net_device *dev = dev_get_drvdata(&op->dev); + struct net_device *dev = platform_get_drvdata(op); mpc52xx_fec_hw_init(dev); mpc52xx_fec_reset_stats(dev); @@ -1079,7 +1093,7 @@ mpc52xx_fec_init(void) int ret; ret = platform_driver_register(&mpc52xx_fec_mdio_driver); if (ret) { - printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n"); + pr_err("failed to register mdio driver\n"); return ret; } #endif diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.h b/drivers/net/ethernet/freescale/fec_mpc52xx.h index 41d2dffde55..10afa54dd06 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.h +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.h @@ -1,5 +1,5 @@ /* - * drivers/drivers/net/fec_mpc52xx/fec.h + * drivers/net/ethernet/freescale/fec_mpc52xx.h * * Driver for the MPC5200 Fast Ethernet Controller * diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index 360a578c2bb..e0528900db0 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -123,12 +123,10 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of) static int mpc52xx_fec_mdio_remove(struct platform_device *of) { - struct device *dev = &of->dev; - struct mii_bus *bus = dev_get_drvdata(dev); + struct mii_bus *bus = platform_get_drvdata(of); struct mpc52xx_fec_mdio_priv *priv = bus->priv; mdiobus_unregister(bus); - dev_set_drvdata(dev, NULL); iounmap(priv->regs); kfree(priv); mdiobus_free(bus); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c new file mode 100644 index 00000000000..82386b29914 --- /dev/null +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -0,0 +1,400 @@ +/* + * Fast Ethernet Controller (ENET) PTP driver for MX6x. + * + * Copyright (C) 2012 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/fec.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/of_net.h> + +#include "fec.h" + +/* FEC 1588 register bits */ +#define FEC_T_CTRL_SLAVE 0x00002000 +#define FEC_T_CTRL_CAPTURE 0x00000800 +#define FEC_T_CTRL_RESTART 0x00000200 +#define FEC_T_CTRL_PERIOD_RST 0x00000030 +#define FEC_T_CTRL_PERIOD_EN 0x00000010 +#define FEC_T_CTRL_ENABLE 0x00000001 + +#define FEC_T_INC_MASK 0x0000007f +#define FEC_T_INC_OFFSET 0 +#define FEC_T_INC_CORR_MASK 0x00007f00 +#define FEC_T_INC_CORR_OFFSET 8 + +#define FEC_ATIME_CTRL 0x400 +#define FEC_ATIME 0x404 +#define FEC_ATIME_EVT_OFFSET 0x408 +#define FEC_ATIME_EVT_PERIOD 0x40c +#define FEC_ATIME_CORR 0x410 +#define FEC_ATIME_INC 0x414 +#define FEC_TS_TIMESTAMP 0x418 + +#define FEC_CC_MULT (1 << 31) +/** + * fec_ptp_read - read raw cycle counter (to be used by time counter) + * @cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static cycle_t fec_ptp_read(const struct cyclecounter *cc) +{ + struct fec_enet_private *fep = + container_of(cc, struct fec_enet_private, cc); + u32 tempval; + + tempval = readl(fep->hwp + FEC_ATIME_CTRL); + tempval |= FEC_T_CTRL_CAPTURE; + writel(tempval, fep->hwp + FEC_ATIME_CTRL); + + return readl(fep->hwp + FEC_ATIME); +} + +/** + * fec_ptp_start_cyclecounter - create the cycle counter from hw + * @ndev: network device + * + * this function initializes the timecounter and cyclecounter + * structures for use in generated a ns counter from the arbitrary + * fixed point cycles registers in the hardware. + */ +void fec_ptp_start_cyclecounter(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned long flags; + int inc; + + inc = 1000000000 / fep->cycle_speed; + + /* grab the ptp lock */ + spin_lock_irqsave(&fep->tmreg_lock, flags); + + /* 1ns counter */ + writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); + + /* use free running count */ + writel(0, fep->hwp + FEC_ATIME_EVT_PERIOD); + + writel(FEC_T_CTRL_ENABLE, fep->hwp + FEC_ATIME_CTRL); + + memset(&fep->cc, 0, sizeof(fep->cc)); + fep->cc.read = fec_ptp_read; + fep->cc.mask = CLOCKSOURCE_MASK(32); + fep->cc.shift = 31; + fep->cc.mult = FEC_CC_MULT; + + /* reset the ns time counter */ + timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real())); + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); +} + +/** + * fec_ptp_adjfreq - adjust ptp cycle frequency + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * Adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + * + * Because ENET hardware frequency adjust is complex, + * using software method to do that. + */ +static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + u64 diff; + unsigned long flags; + int neg_adj = 0; + u32 mult = FEC_CC_MULT; + + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + + if (ppb < 0) { + ppb = -ppb; + neg_adj = 1; + } + + diff = mult; + diff *= ppb; + diff = div_u64(diff, 1000000000ULL); + + spin_lock_irqsave(&fep->tmreg_lock, flags); + /* + * dummy read to set cycle_last in tc to now. + * So use adjusted mult to calculate when next call + * timercounter_read. + */ + timecounter_read(&fep->tc); + + fep->cc.mult = neg_adj ? mult - diff : mult + diff; + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + +/** + * fec_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by + * + * adjust the timer by resetting the timecounter structure. + */ +static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + unsigned long flags; + u64 now; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + now = timecounter_read(&fep->tc); + now += delta; + + /* reset the timecounter */ + timecounter_init(&fep->tc, &fep->cc, now); + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + +/** + * fec_ptp_gettime + * @ptp: the ptp clock structure + * @ts: timespec structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec. + */ +static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct fec_enet_private *adapter = + container_of(ptp, struct fec_enet_private, ptp_caps); + u64 ns; + u32 remainder; + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +/** + * fec_ptp_settime + * @ptp: the ptp clock structure + * @ts: the timespec containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int fec_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + + u64 ns; + unsigned long flags; + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + timecounter_init(&fep->tc, &fep->cc, ns); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + return 0; +} + +/** + * fec_ptp_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + */ +static int fec_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * fec_ptp_hwtstamp_ioctl - control hardware time stamping + * @ndev: pointer to net_device + * @ifreq: ioctl data + * @cmd: particular ioctl requested + */ +int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + struct hwtstamp_config config; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + fep->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + fep->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + if (fep->hwts_rx_en) + fep->hwts_rx_en = 0; + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + default: + /* + * register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages and hardware does not support + * timestamping all packets => return error + */ + fep->hwts_rx_en = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct hwtstamp_config config; + + config.flags = 0; + config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config.rx_filter = (fep->hwts_rx_en ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * fec_time_keep - call timecounter_read every second to avoid timer overrun + * because ENET just support 32bit counter, will timeout in 4s + */ +static void fec_time_keep(unsigned long _data) +{ + struct fec_enet_private *fep = (struct fec_enet_private *)_data; + u64 ns; + unsigned long flags; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + ns = timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + mod_timer(&fep->time_keep, jiffies + HZ); +} + +/** + * fec_ptp_init + * @ndev: The FEC network adapter + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ + +void fec_ptp_init(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + fep->ptp_caps.owner = THIS_MODULE; + snprintf(fep->ptp_caps.name, 16, "fec ptp"); + + fep->ptp_caps.max_adj = 250000000; + fep->ptp_caps.n_alarm = 0; + fep->ptp_caps.n_ext_ts = 0; + fep->ptp_caps.n_per_out = 0; + fep->ptp_caps.n_pins = 0; + fep->ptp_caps.pps = 0; + fep->ptp_caps.adjfreq = fec_ptp_adjfreq; + fep->ptp_caps.adjtime = fec_ptp_adjtime; + fep->ptp_caps.gettime = fec_ptp_gettime; + fep->ptp_caps.settime = fec_ptp_settime; + fep->ptp_caps.enable = fec_ptp_enable; + + fep->cycle_speed = clk_get_rate(fep->clk_ptp); + + spin_lock_init(&fep->tmreg_lock); + + fec_ptp_start_cyclecounter(ndev); + + init_timer(&fep->time_keep); + fep->time_keep.data = (unsigned long)fep; + fep->time_keep.function = fec_time_keep; + fep->time_keep.expires = jiffies + HZ; + add_timer(&fep->time_keep); + + fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); + if (IS_ERR(fep->ptp_clock)) { + fep->ptp_clock = NULL; + pr_err("ptp_clock_register failed\n"); + } +} diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig index 268414d9f2c..be92229f2c2 100644 --- a/drivers/net/ethernet/freescale/fs_enet/Kconfig +++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig @@ -1,7 +1,6 @@ config FS_ENET tristate "Freescale Ethernet Driver" depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) - select NET_CORE select MII select PHYLIB diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h index e980527e2b9..b9fe5bde432 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fec.h +++ b/drivers/net/ethernet/freescale/fs_enet/fec.h @@ -23,6 +23,10 @@ #define FEC_ECNTRL_ETHER_EN 0x00000002 #define FEC_ECNTRL_RESET 0x00000001 +/* RMII mode enabled only when MII_MODE bit is set too. */ +#define FEC_RCNTRL_RMII_MODE (0x00000100 | \ + FEC_RCNTRL_MII_MODE | FEC_RCNTRL_FCE) +#define FEC_RCNTRL_FCE 0x00000020 #define FEC_RCNTRL_BC_REJ 0x00000010 #define FEC_RCNTRL_PROM 0x00000008 #define FEC_RCNTRL_MII_MODE 0x00000004 @@ -33,8 +37,6 @@ #define FEC_TCNTRL_HBC 0x00000002 #define FEC_TCNTRL_GTS 0x00000001 - - /* * Delay to wait for FEC reset command to complete (in us) */ diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 910a8e18a9a..cfaf17b70f3 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -24,7 +24,6 @@ #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -92,6 +91,9 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) u16 pkt_len, sc; int curidx; + if (budget <= 0) + return received; + /* * First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. @@ -154,7 +156,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) if (pkt_len <= fpi->rx_copybreak) { /* +2 to make IP header L1 cache aligned */ - skbn = dev_alloc_skb(pkt_len + 2); + skbn = netdev_alloc_skb(dev, pkt_len + 2); if (skbn != NULL) { skb_reserve(skbn, 2); /* align IP header */ skb_copy_from_linear_data(skb, @@ -165,7 +167,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) skbn = skbt; } } else { - skbn = dev_alloc_skb(ENET_RX_FRSIZE); + skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); if (skbn) skb_align(skbn, ENET_RX_ALIGN); @@ -177,8 +179,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) received++; netif_receive_skb(skb); } else { - dev_warn(fep->dev, - "Memory squeeze, dropping packet.\n"); fep->stats.rx_dropped++; skbn = skb; } @@ -286,7 +286,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev) if (pkt_len <= fpi->rx_copybreak) { /* +2 to make IP header L1 cache aligned */ - skbn = dev_alloc_skb(pkt_len + 2); + skbn = netdev_alloc_skb(dev, pkt_len + 2); if (skbn != NULL) { skb_reserve(skbn, 2); /* align IP header */ skb_copy_from_linear_data(skb, @@ -297,7 +297,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev) skbn = skbt; } } else { - skbn = dev_alloc_skb(ENET_RX_FRSIZE); + skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); if (skbn) skb_align(skbn, ENET_RX_ALIGN); @@ -309,8 +309,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev) received++; netif_rx(skb); } else { - dev_warn(fep->dev, - "Memory squeeze, dropping packet.\n"); fep->stats.rx_dropped++; skbn = skb; } @@ -504,12 +502,10 @@ void fs_init_bds(struct net_device *dev) * Initialize the receive buffer descriptors. */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { - skb = dev_alloc_skb(ENET_RX_FRSIZE); - if (skb == NULL) { - dev_warn(fep->dev, - "Memory squeeze, unable to allocate skb\n"); + skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); + if (skb == NULL) break; - } + skb_align(skb, ENET_RX_ALIGN); fep->rx_skbuff[i] = skb; CBDW_BUFADDR(bdp, @@ -589,17 +585,11 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, struct sk_buff *skb) { struct sk_buff *new_skb; - struct fs_enet_private *fep = netdev_priv(dev); /* Alloc new skb */ - new_skb = dev_alloc_skb(skb->len + 4); - if (!new_skb) { - if (net_ratelimit()) { - dev_warn(fep->dev, - "Memory squeeze, dropping tx packet.\n"); - } + new_skb = netdev_alloc_skb(dev, skb->len + 4); + if (!new_skb) return NULL; - } /* Make sure new skb is properly aligned */ skb_align(new_skb, 4); @@ -790,17 +780,17 @@ static int fs_init_phy(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct phy_device *phydev; + phy_interface_t iface; fep->oldlink = 0; fep->oldspeed = 0; fep->oldduplex = -1; + iface = fep->fpi->use_rmii ? + PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII; + phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, - PHY_INTERFACE_MODE_MII); - if (!phydev) { - phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link, - PHY_INTERFACE_MODE_MII); - } + iface); if (!phydev) { dev_err(&dev->dev, "Could not attach to PHY\n"); return -ENODEV; @@ -884,8 +874,8 @@ static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) static void fs_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static int fs_get_regs_len(struct net_device *dev) @@ -959,6 +949,7 @@ static const struct ethtool_ops fs_ethtool_ops = { .get_msglevel = fs_get_msglevel, .set_msglevel = fs_set_msglevel, .get_regs = fs_get_regs, + .get_ts_info = ethtool_op_get_ts_info, }; static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -999,14 +990,17 @@ static const struct net_device_ops fs_enet_netdev_ops = { }; static struct of_device_id fs_enet_match[]; -static int __devinit fs_enet_probe(struct platform_device *ofdev) +static int fs_enet_probe(struct platform_device *ofdev) { const struct of_device_id *match; struct net_device *ndev; struct fs_enet_private *fep; struct fs_platform_info *fpi; const u32 *data; + struct clk *clk; + int err; const u8 *mac_addr; + const char *phy_connection_type; int privsize, len, ret = -ENODEV; match = of_match_device(fs_enet_match, &ofdev->dev); @@ -1031,9 +1025,37 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) fpi->use_napi = 1; fpi->napi_weight = 17; fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); - if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link", - NULL))) - goto out_free_fpi; + if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) { + err = of_phy_register_fixed_link(ofdev->dev.of_node); + if (err) + goto out_free_fpi; + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + fpi->phy_node = ofdev->dev.of_node; + } + + if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { + phy_connection_type = of_get_property(ofdev->dev.of_node, + "phy-connection-type", NULL); + if (phy_connection_type && !strcmp("rmii", phy_connection_type)) + fpi->use_rmii = 1; + } + + /* make clock lookup non-fatal (the driver is shared among platforms), + * but require enable to succeed when a clock was specified/found, + * keep a reference to the clock upon successful acquisition + */ + clk = devm_clk_get(&ofdev->dev, "per"); + if (!IS_ERR(clk)) { + err = clk_prepare_enable(clk); + if (err) { + ret = err; + goto out_free_fpi; + } + fpi->clk_per = clk; + } privsize = sizeof(*fep) + sizeof(struct sk_buff **) * @@ -1046,7 +1068,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) } SET_NETDEV_DEV(ndev, &ofdev->dev); - dev_set_drvdata(&ofdev->dev, ndev); + platform_set_drvdata(ofdev, ndev); fep = netdev_priv(ndev); fep->dev = &ofdev->dev; @@ -1066,7 +1088,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) mac_addr = of_get_mac_address(ofdev->dev.of_node); if (mac_addr) - memcpy(ndev->dev_addr, mac_addr, 6); + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); ret = fep->ops->allocate_bd(ndev); if (ret) @@ -1104,9 +1126,10 @@ out_cleanup_data: fep->ops->cleanup_data(ndev); out_free_dev: free_netdev(ndev); - dev_set_drvdata(&ofdev->dev, NULL); out_put: of_node_put(fpi->phy_node); + if (fpi->clk_per) + clk_disable_unprepare(fpi->clk_per); out_free_fpi: kfree(fpi); return ret; @@ -1114,7 +1137,7 @@ out_free_fpi: static int fs_enet_remove(struct platform_device *ofdev) { - struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct net_device *ndev = platform_get_drvdata(ofdev); struct fs_enet_private *fep = netdev_priv(ndev); unregister_netdev(ndev); @@ -1123,6 +1146,8 @@ static int fs_enet_remove(struct platform_device *ofdev) fep->ops->cleanup_data(ndev); dev_set_drvdata(fep->dev, NULL); of_node_put(fep->fpi->phy_node); + if (fep->fpi->clk_per) + clk_disable_unprepare(fep->fpi->clk_per); free_netdev(ndev); return 0; } @@ -1150,6 +1175,10 @@ static struct of_device_id fs_enet_match[] = { .compatible = "fsl,mpc5121-fec", .data = (void *)&fs_fec_ops, }, + { + .compatible = "fsl,mpc5125-fec", + .data = (void *)&fs_fec_ops, + }, #else { .compatible = "fsl,pq1-fec-enet", diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 7583a9572bc..f5383abbf39 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -20,7 +20,6 @@ #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -32,7 +31,9 @@ #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/phy.h> +#include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/gfp.h> #include <asm/immap_cpm2.h> @@ -88,7 +89,7 @@ static int do_pd_setup(struct fs_enet_private *fep) struct fs_platform_info *fpi = fep->fpi; int ret = -EINVAL; - fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); + fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); if (fep->interrupt == NO_IRQ) goto out; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index b9fbc83d64a..fc541348849 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -20,7 +20,6 @@ #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -31,7 +30,9 @@ #include <linux/bitops.h> #include <linux/fs.h> #include <linux/platform_device.h> +#include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/gfp.h> #include <asm/irq.h> @@ -98,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep) { struct platform_device *ofdev = to_platform_device(fep->dev); - fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); + fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); if (fep->interrupt == NO_IRQ) return -EINVAL; @@ -322,10 +323,11 @@ static void restart(struct net_device *dev) FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ #else /* - * Only set MII mode - do not touch maximum frame length + * Only set MII/RMII mode - do not touch maximum frame length * configured before. */ - FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); + FS(fecp, r_cntrl, fpi->use_rmii ? + FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE); #endif /* * adjust to duplex mode @@ -381,7 +383,9 @@ static void stop(struct net_device *dev) /* shut down FEC1? that's where the mii bus is */ if (fpi->has_phy) { - FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ + FS(fecp, r_cntrl, fpi->use_rmii ? + FEC_RCNTRL_RMII_MODE : + FEC_RCNTRL_MII_MODE); /* MII/RMII enable */ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); FW(fecp, ievent, FEC_ENET_MII); FW(fecp, mii_speed, feci->mii_speed); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index 22a02a76706..b4bf02f57d4 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -20,7 +20,6 @@ #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -31,6 +30,8 @@ #include <linux/bitops.h> #include <linux/fs.h> #include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/irq.h> @@ -98,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep) { struct platform_device *ofdev = to_platform_device(fep->dev); - fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); + fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); if (fep->interrupt == NO_IRQ) return -EINVAL; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 0f2d1a71090..3d3fde66c2c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -15,13 +15,13 @@ #include <linux/module.h> #include <linux/ioport.h> #include <linux/slab.h> -#include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/platform_device.h> #include <linux/mdio-bitbang.h> +#include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> @@ -108,8 +108,7 @@ static struct mdiobb_ops bb_ops = { .get_mdio_data = mdio_read, }; -static int __devinit fs_mii_bitbang_init(struct mii_bus *bus, - struct device_node *np) +static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np) { struct resource res; const u32 *data; @@ -150,7 +149,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus, return 0; } -static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) +static int fs_enet_mdio_probe(struct platform_device *ofdev) { struct mii_bus *new_bus; struct bb_info *bitbang; @@ -174,11 +173,13 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) + if (!new_bus->irq) { + ret = -ENOMEM; goto out_unmap_regs; + } new_bus->parent = &ofdev->dev; - dev_set_drvdata(&ofdev->dev, new_bus); + platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) @@ -187,7 +188,6 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) return 0; out_free_irqs: - dev_set_drvdata(&ofdev->dev, NULL); kfree(new_bus->irq); out_unmap_regs: iounmap(bitbang->dir); @@ -201,11 +201,10 @@ out: static int fs_enet_mdio_remove(struct platform_device *ofdev) { - struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct mii_bus *bus = platform_get_drvdata(ofdev); struct bb_info *bitbang = bus->priv; mdiobus_unregister(bus); - dev_set_drvdata(&ofdev->dev, NULL); kfree(bus->irq); free_mdio_bitbang(bus); iounmap(bitbang->dir); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 55bb867258e..ebf5d6429a8 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -21,7 +21,6 @@ #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -31,6 +30,7 @@ #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/platform_device.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/pgtable.h> @@ -95,14 +95,8 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, } -static int fs_enet_fec_mii_reset(struct mii_bus *bus) -{ - /* nothing here - for now */ - return 0; -} - static struct of_device_id fs_enet_mdio_fec_match[]; -static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) +static int fs_enet_mdio_probe(struct platform_device *ofdev) { const struct of_device_id *match; struct resource res; @@ -128,7 +122,6 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->name = "FEC MII Bus"; new_bus->read = &fs_enet_fec_mii_read; new_bus->write = &fs_enet_fec_mii_write; - new_bus->reset = &fs_enet_fec_mii_reset; ret = of_address_to_resource(ofdev->dev.of_node, 0, &res); if (ret) @@ -137,8 +130,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); fec->fecp = ioremap(res.start, resource_size(&res)); - if (!fec->fecp) + if (!fec->fecp) { + ret = -ENOMEM; goto out_fec; + } if (get_bus_freq) { clock = get_bus_freq(ofdev->dev.of_node); @@ -172,11 +167,13 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) + if (!new_bus->irq) { + ret = -ENOMEM; goto out_unmap_regs; + } new_bus->parent = &ofdev->dev; - dev_set_drvdata(&ofdev->dev, new_bus); + platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) @@ -185,7 +182,6 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) return 0; out_free_irqs: - dev_set_drvdata(&ofdev->dev, NULL); kfree(new_bus->irq); out_unmap_regs: iounmap(fec->fecp); @@ -200,11 +196,10 @@ out: static int fs_enet_mdio_remove(struct platform_device *ofdev) { - struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct mii_bus *bus = platform_get_drvdata(ofdev); struct fec_info *fec = bus->priv; mdiobus_unregister(bus); - dev_set_drvdata(&ofdev->dev, NULL); kfree(bus->irq); iounmap(fec->fecp); kfree(fec); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 9eb815941df..583e71ab7f5 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -19,51 +19,91 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> -#include <linux/unistd.h> #include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/init.h> #include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/mm.h> #include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/crc32.h> #include <linux/mii.h> -#include <linux/phy.h> -#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> +#include <linux/of_device.h> #include <asm/io.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/ucc.h> +#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */ #include "gianfar.h" -#include "fsl_pq_mdio.h" + +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_NOTVALID 0x00000004 +#define MIIMCFG_INIT_VALUE 0x00000007 +#define MIIMCFG_RESET 0x80000000 + +#define MII_READ_COMMAND 0x00000001 + +struct fsl_pq_mii { + u32 miimcfg; /* MII management configuration reg */ + u32 miimcom; /* MII management command reg */ + u32 miimadd; /* MII management address reg */ + u32 miimcon; /* MII management control reg */ + u32 miimstat; /* MII management status reg */ + u32 miimind; /* MII management indication reg */ +}; + +struct fsl_pq_mdio { + u8 res1[16]; + u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/ + u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/ + u8 res2[4]; + u32 emapm; /* MDIO Event mapping register (for etsec2)*/ + u8 res3[1280]; + struct fsl_pq_mii mii; + u8 res4[28]; + u32 utbipar; /* TBI phy address reg (only on UCC) */ + u8 res5[2728]; +} __packed; + +/* Number of microseconds to wait for an MII register to respond */ +#define MII_TIMEOUT 1000 struct fsl_pq_mdio_priv { void __iomem *map; - struct fsl_pq_mdio __iomem *regs; + struct fsl_pq_mii __iomem *regs; + int irqs[PHY_MAX_ADDR]; +}; + +/* + * Per-device-type data. Each type of device tree node that we support gets + * one of these. + * + * @mii_offset: the offset of the MII registers within the memory map of the + * node. Some nodes define only the MII registers, and some define the whole + * MAC (which includes the MII registers). + * + * @get_tbipa: determines the address of the TBIPA register + * + * @ucc_configure: a special function for extra QE configuration + */ +struct fsl_pq_mdio_data { + unsigned int mii_offset; /* offset of the MII registers */ + uint32_t __iomem * (*get_tbipa)(void __iomem *p); + void (*ucc_configure)(phys_addr_t start, phys_addr_t end); }; /* - * Write value to the PHY at mii_id at register regnum, - * on the bus attached to the local interface, which may be different from the - * generic mdio bus (tied to a single interface), waiting until the write is - * done before returning. This is helpful in programming interfaces like - * the TBI which control interfaces like onchip SERDES and are always tied to - * the local mdio pins, which may not be the same as system mdio bus, used for + * Write value to the PHY at mii_id at register regnum, on the bus attached + * to the local interface, which may be different from the generic mdio bus + * (tied to a single interface), waiting until the write is done before + * returning. This is helpful in programming interfaces like the TBI which + * control interfaces like onchip SERDES and are always tied to the local + * mdio pins, which may not be the same as system mdio bus, used for * controlling the external PHYs, for example. */ -int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, - int regnum, u16 value) +static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) { + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; + u32 status; + /* Set the PHY address and the register address we want to write */ out_be32(®s->miimadd, (mii_id << 8) | regnum); @@ -71,25 +111,27 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, out_be32(®s->miimcon, value); /* Wait for the transaction to finish */ - while (in_be32(®s->miimind) & MIIMIND_BUSY) - cpu_relax(); + status = spin_event_timeout(!(in_be32(®s->miimind) & MIIMIND_BUSY), + MII_TIMEOUT, 0); - return 0; + return status ? 0 : -ETIMEDOUT; } /* - * Read the bus for PHY at addr mii_id, register regnum, and - * return the value. Clears miimcom first. All PHY operation - * done on the bus attached to the local interface, - * which may be different from the generic mdio bus - * This is helpful in programming interfaces like - * the TBI which, in turn, control interfaces like onchip SERDES - * and are always tied to the local mdio pins, which may not be the + * Read the bus for PHY at addr mii_id, register regnum, and return the value. + * Clears miimcom first. + * + * All PHY operation done on the bus attached to the local interface, which + * may be different from the generic mdio bus. This is helpful in programming + * interfaces like the TBI which, in turn, control interfaces like on-chip + * SERDES and are always tied to the local mdio pins, which may not be the * same as system mdio bus, used for controlling the external PHYs, for eg. */ -int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, - int mii_id, int regnum) +static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; + u32 status; u16 value; /* Set the PHY address and the register address we want to read */ @@ -99,52 +141,26 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, out_be32(®s->miimcom, 0); out_be32(®s->miimcom, MII_READ_COMMAND); - /* Wait for the transaction to finish */ - while (in_be32(®s->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY)) - cpu_relax(); + /* Wait for the transaction to finish, normally less than 100us */ + status = spin_event_timeout(!(in_be32(®s->miimind) & + (MIIMIND_NOTVALID | MIIMIND_BUSY)), + MII_TIMEOUT, 0); + if (!status) + return -ETIMEDOUT; /* Grab the value of the register from miimstat */ value = in_be32(®s->miimstat); + dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum); return value; } -static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus) -{ - struct fsl_pq_mdio_priv *priv = bus->priv; - - return priv->regs; -} - -/* - * Write value to the PHY at mii_id at register regnum, - * on the bus, waiting until the write is done before returning. - */ -int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) -{ - struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); - - /* Write to the local MII regs */ - return fsl_pq_local_mdio_write(regs, mii_id, regnum, value); -} - -/* - * Read the bus for PHY at addr mii_id, register regnum, and - * return the value. Clears miimcom first. - */ -int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) -{ - struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); - - /* Read the local MII regs */ - return fsl_pq_local_mdio_read(regs, mii_id, regnum); -} - /* Reset the MIIM registers, and wait for the bus to free */ static int fsl_pq_mdio_reset(struct mii_bus *bus) { - struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); - int timeout = PHY_INIT_TIMEOUT; + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; + u32 status; mutex_lock(&bus->mdio_lock); @@ -155,283 +171,308 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus) out_be32(®s->miimcfg, MIIMCFG_INIT_VALUE); /* Wait until the bus is free */ - while ((in_be32(®s->miimind) & MIIMIND_BUSY) && timeout--) - cpu_relax(); + status = spin_event_timeout(!(in_be32(®s->miimind) & MIIMIND_BUSY), + MII_TIMEOUT, 0); mutex_unlock(&bus->mdio_lock); - if (timeout < 0) { - printk(KERN_ERR "%s: The MII Bus is stuck!\n", - bus->name); + if (!status) { + dev_err(&bus->dev, "timeout waiting for MII bus\n"); return -EBUSY; } return 0; } -void fsl_pq_mdio_bus_name(char *name, struct device_node *np) +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) +/* + * This is mildly evil, but so is our hardware for doing this. + * Also, we have to cast back to struct gfar because of + * definition weirdness done in gianfar.h. + */ +static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) { - const u32 *addr; - u64 taddr = OF_BAD_ADDR; - - addr = of_get_address(np, 0, NULL, NULL); - if (addr) - taddr = of_translate_address(np, addr); + struct gfar __iomem *enet_regs = p; - snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, - (unsigned long long)taddr); + return &enet_regs->tbipa; } -EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name); +/* + * Return the TBIPAR address for an eTSEC2 node + */ +static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) +{ + return p; +} +#endif -static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) +/* + * Return the TBIPAR address for a QE MDIO node + */ +static uint32_t __iomem *get_ucc_tbipa(void __iomem *p) { -#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) - struct gfar __iomem *enet_regs; + struct fsl_pq_mdio __iomem *mdio = p; - /* - * This is mildly evil, but so is our hardware for doing this. - * Also, we have to cast back to struct gfar because of - * definition weirdness done in gianfar.h. - */ - if(of_device_is_compatible(np, "fsl,gianfar-mdio") || - of_device_is_compatible(np, "fsl,gianfar-tbi") || - of_device_is_compatible(np, "gianfar")) { - enet_regs = (struct gfar __iomem *)regs; - return &enet_regs->tbipa; - } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || - of_device_is_compatible(np, "fsl,etsec2-tbi")) { - return of_iomap(np, 1); - } -#endif - return NULL; + return &mdio->utbipar; } - -static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) +/* + * Find the UCC node that controls the given MDIO node + * + * For some reason, the QE MDIO nodes are not children of the UCC devices + * that control them. Therefore, we need to scan all UCC nodes looking for + * the one that encompases the given MDIO node. We do this by comparing + * physical addresses. The 'start' and 'end' addresses of the MDIO node are + * passed, and the correct UCC node will cover the entire address range. + * + * This assumes that there is only one QE MDIO node in the entire device tree. + */ +static void ucc_configure(phys_addr_t start, phys_addr_t end) { -#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) + static bool found_mii_master; struct device_node *np = NULL; - int err = 0; - for_each_compatible_node(np, NULL, "ucc_geth") { - struct resource tempres; + if (found_mii_master) + return; - err = of_address_to_resource(np, 0, &tempres); - if (err) + for_each_compatible_node(np, NULL, "ucc_geth") { + struct resource res; + const uint32_t *iprop; + uint32_t id; + int ret; + + ret = of_address_to_resource(np, 0, &res); + if (ret < 0) { + pr_debug("fsl-pq-mdio: no address range in node %s\n", + np->full_name); continue; + } /* if our mdio regs fall within this UCC regs range */ - if ((start >= tempres.start) && (end <= tempres.end)) { - /* Find the id of the UCC */ - const u32 *id; - - id = of_get_property(np, "cell-index", NULL); - if (!id) { - id = of_get_property(np, "device-id", NULL); - if (!id) - continue; + if ((start < res.start) || (end > res.end)) + continue; + + iprop = of_get_property(np, "cell-index", NULL); + if (!iprop) { + iprop = of_get_property(np, "device-id", NULL); + if (!iprop) { + pr_debug("fsl-pq-mdio: no UCC ID in node %s\n", + np->full_name); + continue; } + } - *ucc_id = *id; + id = be32_to_cpup(iprop); - return 0; + /* + * cell-index and device-id for QE nodes are + * numbered from 1, not 0. + */ + if (ucc_set_qe_mux_mii_mng(id - 1) < 0) { + pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n", + np->full_name); + continue; } + + pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id); + found_mii_master = true; } +} - if (err) - return err; - else - return -EINVAL; -#else - return -ENODEV; #endif -} -static int fsl_pq_mdio_probe(struct platform_device *ofdev) +static struct of_device_id fsl_pq_mdio_match[] = { +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) + { + .compatible = "fsl,gianfar-tbi", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .compatible = "fsl,gianfar-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .type = "mdio", + .compatible = "gianfar", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .compatible = "fsl,etsec2-tbi", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_etsec_tbipa, + }, + }, + { + .compatible = "fsl,etsec2-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_etsec_tbipa, + }, + }, +#endif +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) + { + .compatible = "fsl,ucc-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_ucc_tbipa, + .ucc_configure = ucc_configure, + }, + }, + { + /* Legacy UCC MDIO node */ + .type = "mdio", + .compatible = "ucc_geth_phy", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_ucc_tbipa, + .ucc_configure = ucc_configure, + }, + }, +#endif + /* No Kconfig option for Fman support yet */ + { + .compatible = "fsl,fman-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + /* Fman TBI operations are handled elsewhere */ + }, + }, + + {}, +}; +MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); + +static int fsl_pq_mdio_probe(struct platform_device *pdev) { - struct device_node *np = ofdev->dev.of_node; + const struct of_device_id *id = + of_match_device(fsl_pq_mdio_match, &pdev->dev); + const struct fsl_pq_mdio_data *data = id->data; + struct device_node *np = pdev->dev.of_node; + struct resource res; struct device_node *tbi; struct fsl_pq_mdio_priv *priv; - struct fsl_pq_mdio __iomem *regs = NULL; - void __iomem *map; - u32 __iomem *tbipa; struct mii_bus *new_bus; - int tbiaddr = -1; - const u32 *addrp; - u64 addr = 0, size = 0; int err; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); - new_bus = mdiobus_alloc(); - if (!new_bus) { - err = -ENOMEM; - goto err_free_priv; - } + new_bus = mdiobus_alloc_size(sizeof(*priv)); + if (!new_bus) + return -ENOMEM; + priv = new_bus->priv; new_bus->name = "Freescale PowerQUICC MII Bus", - new_bus->read = &fsl_pq_mdio_read, - new_bus->write = &fsl_pq_mdio_write, - new_bus->reset = &fsl_pq_mdio_reset, - new_bus->priv = priv; - fsl_pq_mdio_bus_name(new_bus->id, np); - - addrp = of_get_address(np, 0, &size, NULL); - if (!addrp) { - err = -EINVAL; - goto err_free_bus; + new_bus->read = &fsl_pq_mdio_read; + new_bus->write = &fsl_pq_mdio_write; + new_bus->reset = &fsl_pq_mdio_reset; + new_bus->irq = priv->irqs; + + err = of_address_to_resource(np, 0, &res); + if (err < 0) { + dev_err(&pdev->dev, "could not obtain address information\n"); + goto error; } - /* Set the PHY base address */ - addr = of_translate_address(np, addrp); - if (addr == OF_BAD_ADDR) { - err = -EINVAL; - goto err_free_bus; - } + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name, + (unsigned long long)res.start); - map = ioremap(addr, size); - if (!map) { + priv->map = of_iomap(np, 0); + if (!priv->map) { err = -ENOMEM; - goto err_free_bus; + goto error; } - priv->map = map; - - if (of_device_is_compatible(np, "fsl,gianfar-mdio") || - of_device_is_compatible(np, "fsl,gianfar-tbi") || - of_device_is_compatible(np, "fsl,ucc-mdio") || - of_device_is_compatible(np, "ucc_geth_phy")) - map -= offsetof(struct fsl_pq_mdio, miimcfg); - regs = map; - priv->regs = regs; - new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - - if (NULL == new_bus->irq) { - err = -ENOMEM; - goto err_unmap_regs; + /* + * Some device tree nodes represent only the MII registers, and + * others represent the MAC and MII registers. The 'mii_offset' field + * contains the offset of the MII registers inside the mapped register + * space. + */ + if (data->mii_offset > resource_size(&res)) { + dev_err(&pdev->dev, "invalid register map\n"); + err = -EINVAL; + goto error; } + priv->regs = priv->map + data->mii_offset; - new_bus->parent = &ofdev->dev; - dev_set_drvdata(&ofdev->dev, new_bus); - - if (of_device_is_compatible(np, "fsl,gianfar-mdio") || - of_device_is_compatible(np, "fsl,gianfar-tbi") || - of_device_is_compatible(np, "fsl,etsec2-mdio") || - of_device_is_compatible(np, "fsl,etsec2-tbi") || - of_device_is_compatible(np, "gianfar")) { - tbipa = get_gfar_tbipa(regs, np); - if (!tbipa) { - err = -EINVAL; - goto err_free_irqs; - } - } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || - of_device_is_compatible(np, "ucc_geth_phy")) { - u32 id; - static u32 mii_mng_master; - - tbipa = ®s->utbipar; + new_bus->parent = &pdev->dev; + platform_set_drvdata(pdev, new_bus); - if ((err = get_ucc_id_for_range(addr, addr + size, &id))) - goto err_free_irqs; - - if (!mii_mng_master) { - mii_mng_master = id; - ucc_set_qe_mux_mii_mng(id - 1); + if (data->get_tbipa) { + for_each_child_of_node(np, tbi) { + if (strcmp(tbi->type, "tbi-phy") == 0) { + dev_dbg(&pdev->dev, "found TBI PHY node %s\n", + strrchr(tbi->full_name, '/') + 1); + break; + } } - } else { - err = -ENODEV; - goto err_free_irqs; - } - for_each_child_of_node(np, tbi) { - if (!strncmp(tbi->type, "tbi-phy", 8)) - break; - } + if (tbi) { + const u32 *prop = of_get_property(tbi, "reg", NULL); + uint32_t __iomem *tbipa; + + if (!prop) { + dev_err(&pdev->dev, + "missing 'reg' property in node %s\n", + tbi->full_name); + err = -EBUSY; + goto error; + } - if (tbi) { - const u32 *prop = of_get_property(tbi, "reg", NULL); + tbipa = data->get_tbipa(priv->map); - if (prop) - tbiaddr = *prop; + out_be32(tbipa, be32_to_cpup(prop)); + } } - if (tbiaddr == -1) { - err = -EBUSY; - goto err_free_irqs; - } else { - out_be32(tbipa, tbiaddr); - } + if (data->ucc_configure) + data->ucc_configure(res.start, res.end); err = of_mdiobus_register(new_bus, np); if (err) { - printk (KERN_ERR "%s: Cannot register as MDIO bus\n", - new_bus->name); - goto err_free_irqs; + dev_err(&pdev->dev, "cannot register %s as MDIO bus\n", + new_bus->name); + goto error; } return 0; -err_free_irqs: - kfree(new_bus->irq); -err_unmap_regs: - iounmap(priv->map); -err_free_bus: +error: + if (priv->map) + iounmap(priv->map); + kfree(new_bus); -err_free_priv: - kfree(priv); + return err; } -static int fsl_pq_mdio_remove(struct platform_device *ofdev) +static int fsl_pq_mdio_remove(struct platform_device *pdev) { - struct device *device = &ofdev->dev; + struct device *device = &pdev->dev; struct mii_bus *bus = dev_get_drvdata(device); struct fsl_pq_mdio_priv *priv = bus->priv; mdiobus_unregister(bus); - dev_set_drvdata(device, NULL); - iounmap(priv->map); - bus->priv = NULL; mdiobus_free(bus); - kfree(priv); return 0; } -static struct of_device_id fsl_pq_mdio_match[] = { - { - .type = "mdio", - .compatible = "ucc_geth_phy", - }, - { - .type = "mdio", - .compatible = "gianfar", - }, - { - .compatible = "fsl,ucc-mdio", - }, - { - .compatible = "fsl,gianfar-tbi", - }, - { - .compatible = "fsl,gianfar-mdio", - }, - { - .compatible = "fsl,etsec2-tbi", - }, - { - .compatible = "fsl,etsec2-mdio", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); - static struct platform_driver fsl_pq_mdio_driver = { .driver = { .name = "fsl-pq_mdio", diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h deleted file mode 100644 index bd17a2a0139..00000000000 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation - * Driver for the MDIO bus controller on Freescale PowerQUICC processors - * - * Author: Andy Fleming - * Modifier: Sandeep Gopalpet - * - * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ -#ifndef __FSL_PQ_MDIO_H -#define __FSL_PQ_MDIO_H - -#define MIIMIND_BUSY 0x00000001 -#define MIIMIND_NOTVALID 0x00000004 -#define MIIMCFG_INIT_VALUE 0x00000007 -#define MIIMCFG_RESET 0x80000000 - -#define MII_READ_COMMAND 0x00000001 - -struct fsl_pq_mdio { - u8 res1[16]; - u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/ - u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/ - u8 res2[4]; - u32 emapm; /* MDIO Event mapping register (for etsec2)*/ - u8 res3[1280]; - u32 miimcfg; /* MII management configuration reg */ - u32 miimcom; /* MII management command reg */ - u32 miimadd; /* MII management address reg */ - u32 miimcon; /* MII management control reg */ - u32 miimstat; /* MII management status reg */ - u32 miimind; /* MII management indication reg */ - u8 reserved[28]; /* Space holder */ - u32 utbipar; /* TBI phy address reg (only on UCC) */ - u8 res4[2728]; -} __packed; - -int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); -int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); -int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, - int regnum, u16 value); -int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum); -int __init fsl_pq_mdio_init(void); -void fsl_pq_mdio_exit(void); -void fsl_pq_mdio_bus_name(char *name, struct device_node *np); -#endif /* FSL_PQ_MDIO_H */ diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 39d160d353a..a6cf40e62f3 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1,5 +1,4 @@ -/* - * drivers/net/gianfar.c +/* drivers/net/ethernet/freescale/gianfar.c * * Gianfar Ethernet Driver * This driver is designed for the non-CPM ethernet controllers @@ -10,7 +9,7 @@ * Maintainer: Kumar Gala * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> * - * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. + * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. * Copyright 2007 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it @@ -71,7 +70,6 @@ #include <linux/unistd.h> #include <linux/slab.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -79,6 +77,8 @@ #include <linux/if_vlan.h> #include <linux/spinlock.h> #include <linux/mm.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/ip.h> @@ -89,6 +89,7 @@ #include <asm/io.h> #include <asm/reg.h> +#include <asm/mpc85xx.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <linux/module.h> @@ -101,13 +102,9 @@ #include <linux/of_net.h> #include "gianfar.h" -#include "fsl_pq_mdio.h" #define TX_TIMEOUT (1*HZ) -#undef BRIEF_GFAR_ERRORS -#undef VERBOSE_GFAR_ERRORS -const char gfar_driver_name[] = "Gianfar Ethernet"; const char gfar_driver_version[] = "1.3"; static int gfar_enet_open(struct net_device *dev); @@ -117,14 +114,14 @@ static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); struct sk_buff *gfar_new_skb(struct net_device *dev); static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, - struct sk_buff *skb); + struct sk_buff *skb); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id); static irqreturn_t gfar_transmit(int irq, void *dev_id); static irqreturn_t gfar_interrupt(int irq, void *dev_id); static void adjust_link(struct net_device *dev); -static void init_registers(struct net_device *dev); +static noinline void gfar_update_link_state(struct gfar_private *priv); static int init_phy(struct net_device *dev); static int gfar_probe(struct platform_device *ofdev); static int gfar_remove(struct platform_device *ofdev); @@ -132,17 +129,18 @@ static void free_skb_resources(struct gfar_private *priv); static void gfar_set_multi(struct net_device *dev); static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); static void gfar_configure_serdes(struct net_device *dev); -static int gfar_poll(struct napi_struct *napi, int budget); +static int gfar_poll_rx(struct napi_struct *napi, int budget); +static int gfar_poll_tx(struct napi_struct *napi, int budget); +static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); +static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); -static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); -static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull); -void gfar_halt(struct net_device *dev); -static void gfar_halt_nodisable(struct net_device *dev); -void gfar_start(struct net_device *dev); +static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); +static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int amount_pull, struct napi_struct *napi); +static void gfar_halt_nodisable(struct gfar_private *priv); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, const u8 *addr); @@ -215,7 +213,7 @@ static int gfar_init_bds(struct net_device *ndev) skb = gfar_new_skb(ndev); if (!skb) { netdev_err(ndev, "Can't allocate RX buffers\n"); - goto err_rxalloc_fail; + return -ENOMEM; } rx_queue->rx_skbuff[j] = skb; @@ -228,10 +226,6 @@ static int gfar_init_bds(struct net_device *ndev) } return 0; - -err_rxalloc_fail: - free_skb_resources(priv); - return -ENOMEM; } static int gfar_alloc_skb_resources(struct net_device *ndev) @@ -240,7 +234,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) dma_addr_t addr; int i, j, k; struct gfar_private *priv = netdev_priv(ndev); - struct device *dev = &priv->ofdev->dev; + struct device *dev = priv->dev; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; @@ -254,14 +248,13 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) /* Allocate memory for the buffer descriptors */ vaddr = dma_alloc_coherent(dev, - sizeof(struct txbd8) * priv->total_tx_ring_size + - sizeof(struct rxbd8) * priv->total_rx_ring_size, - &addr, GFP_KERNEL); - if (!vaddr) { - netif_err(priv, ifup, ndev, - "Could not allocate buffer descriptors!\n"); + (priv->total_tx_ring_size * + sizeof(struct txbd8)) + + (priv->total_rx_ring_size * + sizeof(struct rxbd8)), + &addr, GFP_KERNEL); + if (!vaddr) return -ENOMEM; - } for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; @@ -269,8 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) tx_queue->tx_bd_dma_base = addr; tx_queue->dev = ndev; /* enet DMA only understands physical addresses */ - addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; - vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; + addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; + vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; } /* Start the rx descriptor ring where the tx ring leaves off */ @@ -279,20 +272,19 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) rx_queue->rx_bd_base = vaddr; rx_queue->rx_bd_dma_base = addr; rx_queue->dev = ndev; - addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; - vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; + addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; + vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; } /* Setup the skbuff rings */ for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; - tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * - tx_queue->tx_ring_size, GFP_KERNEL); - if (!tx_queue->tx_skbuff) { - netif_err(priv, ifup, ndev, - "Could not allocate tx_skbuff\n"); + tx_queue->tx_skbuff = + kmalloc_array(tx_queue->tx_ring_size, + sizeof(*tx_queue->tx_skbuff), + GFP_KERNEL); + if (!tx_queue->tx_skbuff) goto cleanup; - } for (k = 0; k < tx_queue->tx_ring_size; k++) tx_queue->tx_skbuff[k] = NULL; @@ -300,14 +292,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * - rx_queue->rx_ring_size, GFP_KERNEL); - - if (!rx_queue->rx_skbuff) { - netif_err(priv, ifup, ndev, - "Could not allocate rx_skbuff\n"); + rx_queue->rx_skbuff = + kmalloc_array(rx_queue->rx_ring_size, + sizeof(*rx_queue->rx_skbuff), + GFP_KERNEL); + if (!rx_queue->rx_skbuff) goto cleanup; - } for (j = 0; j < rx_queue->rx_ring_size; j++) rx_queue->rx_skbuff[j] = NULL; @@ -330,98 +320,143 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv) int i; baddr = ®s->tbase0; - for(i = 0; i < priv->num_tx_queues; i++) { + for (i = 0; i < priv->num_tx_queues; i++) { gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); - baddr += 2; + baddr += 2; } baddr = ®s->rbase0; - for(i = 0; i < priv->num_rx_queues; i++) { + for (i = 0; i < priv->num_rx_queues; i++) { gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); - baddr += 2; + baddr += 2; } } -static void gfar_init_mac(struct net_device *ndev) +static void gfar_rx_buff_size_config(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(ndev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 rctrl = 0; - u32 tctrl = 0; - u32 attrs = 0; + int frame_size = priv->ndev->mtu + ETH_HLEN; - /* write the tx/rx base registers */ - gfar_init_tx_rx_base(priv); + /* set this when rx hw offload (TOE) functions are being used */ + priv->uses_rxfcb = 0; - /* Configure the coalescing support */ - gfar_configure_coalescing(priv, 0xFF, 0xFF); + if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) + priv->uses_rxfcb = 1; + + if (priv->hwts_rx_en) + priv->uses_rxfcb = 1; + + if (priv->uses_rxfcb) + frame_size += GMAC_FCB_LEN; + + frame_size += priv->padding; + + frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + + INCREMENTAL_BUFFER_SIZE; + + priv->rx_buffer_size = frame_size; +} + +static void gfar_mac_rx_config(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 rctrl = 0; if (priv->rx_filer_enable) { rctrl |= RCTRL_FILREN; /* Program the RIR0 reg with the required distribution */ - gfar_write(®s->rir0, DEFAULT_RIR0); + if (priv->poll_mode == GFAR_SQ_POLLING) + gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); + else /* GFAR_MQ_POLLING */ + gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); } - if (ndev->features & NETIF_F_RXCSUM) - rctrl |= RCTRL_CHECKSUMMING; + /* Restore PROMISC mode */ + if (priv->ndev->flags & IFF_PROMISC) + rctrl |= RCTRL_PROM; - if (priv->extended_hash) { - rctrl |= RCTRL_EXTHASH; + if (priv->ndev->features & NETIF_F_RXCSUM) + rctrl |= RCTRL_CHECKSUMMING; - gfar_clear_exact_match(ndev); - rctrl |= RCTRL_EMEN; - } + if (priv->extended_hash) + rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; if (priv->padding) { rctrl &= ~RCTRL_PAL_MASK; rctrl |= RCTRL_PADDING(priv->padding); } - /* Insert receive time stamps into padding alignment bytes */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { - rctrl &= ~RCTRL_PAL_MASK; - rctrl |= RCTRL_PADDING(8); - priv->padding = 8; - } - /* Enable HW time stamping if requested from user space */ if (priv->hwts_rx_en) rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; - if (ndev->features & NETIF_F_HW_VLAN_RX) + if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; /* Init rctrl based on our settings */ gfar_write(®s->rctrl, rctrl); +} - if (ndev->features & NETIF_F_IP_CSUM) +static void gfar_mac_tx_config(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tctrl = 0; + + if (priv->ndev->features & NETIF_F_IP_CSUM) tctrl |= TCTRL_INIT_CSUM; - tctrl |= TCTRL_TXSCHED_PRIO; + if (priv->prio_sched_en) + tctrl |= TCTRL_TXSCHED_PRIO; + else { + tctrl |= TCTRL_TXSCHED_WRRS; + gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); + gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); + } - gfar_write(®s->tctrl, tctrl); + if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) + tctrl |= TCTRL_VLINS; - /* Set the extraction length and index */ - attrs = ATTRELI_EL(priv->rx_stash_size) | - ATTRELI_EI(priv->rx_stash_index); + gfar_write(®s->tctrl, tctrl); +} - gfar_write(®s->attreli, attrs); +static void gfar_configure_coalescing(struct gfar_private *priv, + unsigned long tx_mask, unsigned long rx_mask) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; - /* Start with defaults, and add stashing or locking - * depending on the approprate variables */ - attrs = ATTR_INIT_SETTINGS; + if (priv->mode == MQ_MG_MODE) { + int i = 0; - if (priv->bd_stash_en) - attrs |= ATTR_BDSTASH; + baddr = ®s->txic0; + for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { + gfar_write(baddr + i, 0); + if (likely(priv->tx_queue[i]->txcoalescing)) + gfar_write(baddr + i, priv->tx_queue[i]->txic); + } - if (priv->rx_stash_size != 0) - attrs |= ATTR_BUFSTASH; + baddr = ®s->rxic0; + for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { + gfar_write(baddr + i, 0); + if (likely(priv->rx_queue[i]->rxcoalescing)) + gfar_write(baddr + i, priv->rx_queue[i]->rxic); + } + } else { + /* Backward compatible case -- even if we enable + * multiple queues, there's only single reg to program + */ + gfar_write(®s->txic, 0); + if (likely(priv->tx_queue[0]->txcoalescing)) + gfar_write(®s->txic, priv->tx_queue[0]->txic); - gfar_write(®s->attr, attrs); + gfar_write(®s->rxic, 0); + if (unlikely(priv->rx_queue[0]->rxcoalescing)) + gfar_write(®s->rxic, priv->rx_queue[0]->rxic); + } +} - gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); - gfar_write(®s->fifo_tx_starve, priv->fifo_starve); - gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); +void gfar_configure_coalescing_all(struct gfar_private *priv) +{ + gfar_configure_coalescing(priv, 0xFF, 0xFF); } static struct net_device_stats *gfar_get_stats(struct net_device *dev) @@ -429,16 +464,16 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) struct gfar_private *priv = netdev_priv(dev); unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; unsigned long tx_packets = 0, tx_bytes = 0; - int i = 0; + int i; for (i = 0; i < priv->num_rx_queues; i++) { rx_packets += priv->rx_queue[i]->stats.rx_packets; - rx_bytes += priv->rx_queue[i]->stats.rx_bytes; + rx_bytes += priv->rx_queue[i]->stats.rx_bytes; rx_dropped += priv->rx_queue[i]->stats.rx_dropped; } dev->stats.rx_packets = rx_packets; - dev->stats.rx_bytes = rx_bytes; + dev->stats.rx_bytes = rx_bytes; dev->stats.rx_dropped = rx_dropped; for (i = 0; i < priv->num_tx_queues; i++) { @@ -446,7 +481,7 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) tx_packets += priv->tx_queue[i]->stats.tx_packets; } - dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_bytes = tx_bytes; dev->stats.tx_packets = tx_packets; return &dev->stats; @@ -469,63 +504,91 @@ static const struct net_device_ops gfar_netdev_ops = { #endif }; -void lock_rx_qs(struct gfar_private *priv) +static void gfar_ints_disable(struct gfar_private *priv) { - int i = 0x0; + int i; + for (i = 0; i < priv->num_grps; i++) { + struct gfar __iomem *regs = priv->gfargrp[i].regs; + /* Clear IEVENT */ + gfar_write(®s->ievent, IEVENT_INIT_CLEAR); - for (i = 0; i < priv->num_rx_queues; i++) - spin_lock(&priv->rx_queue[i]->rxlock); + /* Initialize IMASK */ + gfar_write(®s->imask, IMASK_INIT_CLEAR); + } } -void lock_tx_qs(struct gfar_private *priv) +static void gfar_ints_enable(struct gfar_private *priv) { - int i = 0x0; - - for (i = 0; i < priv->num_tx_queues; i++) - spin_lock(&priv->tx_queue[i]->txlock); + int i; + for (i = 0; i < priv->num_grps; i++) { + struct gfar __iomem *regs = priv->gfargrp[i].regs; + /* Unmask the interrupts we look for */ + gfar_write(®s->imask, IMASK_DEFAULT); + } } -void unlock_rx_qs(struct gfar_private *priv) +void lock_tx_qs(struct gfar_private *priv) { - int i = 0x0; + int i; - for (i = 0; i < priv->num_rx_queues; i++) - spin_unlock(&priv->rx_queue[i]->rxlock); + for (i = 0; i < priv->num_tx_queues; i++) + spin_lock(&priv->tx_queue[i]->txlock); } void unlock_tx_qs(struct gfar_private *priv) { - int i = 0x0; + int i; for (i = 0; i < priv->num_tx_queues; i++) spin_unlock(&priv->tx_queue[i]->txlock); } -static bool gfar_is_vlan_on(struct gfar_private *priv) +static int gfar_alloc_tx_queues(struct gfar_private *priv) { - return (priv->ndev->features & NETIF_F_HW_VLAN_RX) || - (priv->ndev->features & NETIF_F_HW_VLAN_TX); + int i; + + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), + GFP_KERNEL); + if (!priv->tx_queue[i]) + return -ENOMEM; + + priv->tx_queue[i]->tx_skbuff = NULL; + priv->tx_queue[i]->qindex = i; + priv->tx_queue[i]->dev = priv->ndev; + spin_lock_init(&(priv->tx_queue[i]->txlock)); + } + return 0; } -/* Returns 1 if incoming frames use an FCB */ -static inline int gfar_uses_fcb(struct gfar_private *priv) +static int gfar_alloc_rx_queues(struct gfar_private *priv) { - return gfar_is_vlan_on(priv) || - (priv->ndev->features & NETIF_F_RXCSUM) || - (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); + int i; + + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), + GFP_KERNEL); + if (!priv->rx_queue[i]) + return -ENOMEM; + + priv->rx_queue[i]->rx_skbuff = NULL; + priv->rx_queue[i]->qindex = i; + priv->rx_queue[i]->dev = priv->ndev; + } + return 0; } -static void free_tx_pointers(struct gfar_private *priv) +static void gfar_free_tx_queues(struct gfar_private *priv) { - int i = 0; + int i; for (i = 0; i < priv->num_tx_queues; i++) kfree(priv->tx_queue[i]); } -static void free_rx_pointers(struct gfar_private *priv) +static void gfar_free_rx_queues(struct gfar_private *priv) { - int i = 0; + int i; for (i = 0; i < priv->num_rx_queues; i++) kfree(priv->rx_queue[i]); @@ -533,69 +596,124 @@ static void free_rx_pointers(struct gfar_private *priv) static void unmap_group_regs(struct gfar_private *priv) { - int i = 0; + int i; for (i = 0; i < MAXGROUPS; i++) if (priv->gfargrp[i].regs) iounmap(priv->gfargrp[i].regs); } -static void disable_napi(struct gfar_private *priv) +static void free_gfar_dev(struct gfar_private *priv) { - int i = 0; + int i, j; for (i = 0; i < priv->num_grps; i++) - napi_disable(&priv->gfargrp[i].napi); + for (j = 0; j < GFAR_NUM_IRQS; j++) { + kfree(priv->gfargrp[i].irqinfo[j]); + priv->gfargrp[i].irqinfo[j] = NULL; + } + + free_netdev(priv->ndev); +} + +static void disable_napi(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < priv->num_grps; i++) { + napi_disable(&priv->gfargrp[i].napi_rx); + napi_disable(&priv->gfargrp[i].napi_tx); + } } static void enable_napi(struct gfar_private *priv) { - int i = 0; + int i; - for (i = 0; i < priv->num_grps; i++) - napi_enable(&priv->gfargrp[i].napi); + for (i = 0; i < priv->num_grps; i++) { + napi_enable(&priv->gfargrp[i].napi_rx); + napi_enable(&priv->gfargrp[i].napi_tx); + } } static int gfar_parse_group(struct device_node *np, - struct gfar_private *priv, const char *model) + struct gfar_private *priv, const char *model) { - u32 *queue_mask; + struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; + int i; + + for (i = 0; i < GFAR_NUM_IRQS; i++) { + grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), + GFP_KERNEL); + if (!grp->irqinfo[i]) + return -ENOMEM; + } - priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); - if (!priv->gfargrp[priv->num_grps].regs) + grp->regs = of_iomap(np, 0); + if (!grp->regs) return -ENOMEM; - priv->gfargrp[priv->num_grps].interruptTransmit = - irq_of_parse_and_map(np, 0); + gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); /* If we aren't the FEC we have multiple interrupts */ if (model && strcasecmp(model, "FEC")) { - priv->gfargrp[priv->num_grps].interruptReceive = - irq_of_parse_and_map(np, 1); - priv->gfargrp[priv->num_grps].interruptError = - irq_of_parse_and_map(np,2); - if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ || - priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ || - priv->gfargrp[priv->num_grps].interruptError == NO_IRQ) + gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); + gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); + if (gfar_irq(grp, TX)->irq == NO_IRQ || + gfar_irq(grp, RX)->irq == NO_IRQ || + gfar_irq(grp, ER)->irq == NO_IRQ) return -EINVAL; } - priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; - priv->gfargrp[priv->num_grps].priv = priv; - spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); - if(priv->mode == MQ_MG_MODE) { - queue_mask = (u32 *)of_get_property(np, - "fsl,rx-bit-map", NULL); - priv->gfargrp[priv->num_grps].rx_bit_map = - queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); - queue_mask = (u32 *)of_get_property(np, - "fsl,tx-bit-map", NULL); - priv->gfargrp[priv->num_grps].tx_bit_map = - queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); + grp->priv = priv; + spin_lock_init(&grp->grplock); + if (priv->mode == MQ_MG_MODE) { + u32 *rxq_mask, *txq_mask; + rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); + txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); + + if (priv->poll_mode == GFAR_SQ_POLLING) { + /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ + grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + } else { /* GFAR_MQ_POLLING */ + grp->rx_bit_map = rxq_mask ? + *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = txq_mask ? + *txq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } } else { - priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; - priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; + grp->rx_bit_map = 0xFF; + grp->tx_bit_map = 0xFF; } + + /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses + * right to left, so we need to revert the 8 bits to get the q index + */ + grp->rx_bit_map = bitrev8(grp->rx_bit_map); + grp->tx_bit_map = bitrev8(grp->tx_bit_map); + + /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, + * also assign queues to groups + */ + for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { + if (!grp->rx_queue) + grp->rx_queue = priv->rx_queue[i]; + grp->num_rx_queues++; + grp->rstat |= (RSTAT_CLEAR_RHALT >> i); + priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); + priv->rx_queue[i]->grp = grp; + } + + for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { + if (!grp->tx_queue) + grp->tx_queue = priv->tx_queue[i]; + grp->num_tx_queues++; + grp->tstat |= (TSTAT_CLEAR_THALT >> i); + priv->tqueue |= (TQUEUE_EN0 >> i); + priv->tx_queue[i]->grp = grp; + } + priv->num_grps++; return 0; @@ -616,13 +734,45 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) const u32 *stash_idx; unsigned int num_tx_qs, num_rx_qs; u32 *tx_queues, *rx_queues; + unsigned short mode, poll_mode; if (!np || !of_device_is_available(np)) return -ENODEV; - /* parse the num of tx and rx queues */ + if (of_device_is_compatible(np, "fsl,etsec2")) { + mode = MQ_MG_MODE; + poll_mode = GFAR_SQ_POLLING; + } else { + mode = SQ_SG_MODE; + poll_mode = GFAR_SQ_POLLING; + } + + /* parse the num of HW tx and rx queues */ tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); - num_tx_qs = tx_queues ? *tx_queues : 1; + rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); + + if (mode == SQ_SG_MODE) { + num_tx_qs = 1; + num_rx_qs = 1; + } else { /* MQ_MG_MODE */ + /* get the actual number of supported groups */ + unsigned int num_grps = of_get_available_child_count(np); + + if (num_grps == 0 || num_grps > MAXGROUPS) { + dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", + num_grps); + pr_err("Cannot do alloc_etherdev, aborting\n"); + return -EINVAL; + } + + if (poll_mode == GFAR_SQ_POLLING) { + num_tx_qs = num_grps; /* one txq per int group */ + num_rx_qs = num_grps; /* one rxq per int group */ + } else { /* GFAR_MQ_POLLING */ + num_tx_qs = tx_queues ? *tx_queues : 1; + num_rx_qs = rx_queues ? *rx_queues : 1; + } + } if (num_tx_qs > MAX_TX_QS) { pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", @@ -631,9 +781,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) return -EINVAL; } - rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); - num_rx_qs = rx_queues ? *rx_queues : 1; - if (num_rx_qs > MAX_RX_QS) { pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", num_rx_qs, MAX_RX_QS); @@ -647,15 +794,24 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) return -ENOMEM; priv = netdev_priv(dev); - priv->node = ofdev->dev.of_node; priv->ndev = dev; + priv->mode = mode; + priv->poll_mode = poll_mode; + priv->num_tx_queues = num_tx_qs; netif_set_real_num_rx_queues(dev, num_rx_qs); priv->num_rx_queues = num_rx_qs; - priv->num_grps = 0x0; - /* Init Rx queue filer rule set linked list*/ + err = gfar_alloc_tx_queues(priv); + if (err) + goto tx_alloc_failed; + + err = gfar_alloc_rx_queues(priv); + if (err) + goto rx_alloc_failed; + + /* Init Rx queue filer rule set linked list */ INIT_LIST_HEAD(&priv->rx_list.list); priv->rx_list.count = 0; mutex_init(&priv->rx_queue_access); @@ -666,52 +822,18 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv->gfargrp[i].regs = NULL; /* Parse and initialize group specific information */ - if (of_device_is_compatible(np, "fsl,etsec2")) { - priv->mode = MQ_MG_MODE; + if (priv->mode == MQ_MG_MODE) { for_each_child_of_node(np, child) { err = gfar_parse_group(child, priv, model); if (err) goto err_grp_init; } - } else { - priv->mode = SQ_SG_MODE; + } else { /* SQ_SG_MODE */ err = gfar_parse_group(np, priv, model); - if(err) + if (err) goto err_grp_init; } - for (i = 0; i < priv->num_tx_queues; i++) - priv->tx_queue[i] = NULL; - for (i = 0; i < priv->num_rx_queues; i++) - priv->rx_queue[i] = NULL; - - for (i = 0; i < priv->num_tx_queues; i++) { - priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), - GFP_KERNEL); - if (!priv->tx_queue[i]) { - err = -ENOMEM; - goto tx_alloc_failed; - } - priv->tx_queue[i]->tx_skbuff = NULL; - priv->tx_queue[i]->qindex = i; - priv->tx_queue[i]->dev = dev; - spin_lock_init(&(priv->tx_queue[i]->txlock)); - } - - for (i = 0; i < priv->num_rx_queues; i++) { - priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), - GFP_KERNEL); - if (!priv->rx_queue[i]) { - err = -ENOMEM; - goto rx_alloc_failed; - } - priv->rx_queue[i]->rx_skbuff = NULL; - priv->rx_queue[i]->qindex = i; - priv->rx_queue[i]->dev = dev; - spin_lock_init(&(priv->rx_queue[i]->rxlock)); - } - - stash = of_get_property(np, "bd-stash", NULL); if (stash) { @@ -733,27 +855,26 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; mac_addr = of_get_mac_address(np); + if (mac_addr) memcpy(dev->dev_addr, mac_addr, ETH_ALEN); if (model && !strcasecmp(model, "TSEC")) - priv->device_flags = - FSL_GIANFAR_DEV_HAS_GIGABIT | - FSL_GIANFAR_DEV_HAS_COALESCE | - FSL_GIANFAR_DEV_HAS_RMON | - FSL_GIANFAR_DEV_HAS_MULTI_INTR; + priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | + FSL_GIANFAR_DEV_HAS_COALESCE | + FSL_GIANFAR_DEV_HAS_RMON | + FSL_GIANFAR_DEV_HAS_MULTI_INTR; + if (model && !strcasecmp(model, "eTSEC")) - priv->device_flags = - FSL_GIANFAR_DEV_HAS_GIGABIT | - FSL_GIANFAR_DEV_HAS_COALESCE | - FSL_GIANFAR_DEV_HAS_RMON | - FSL_GIANFAR_DEV_HAS_MULTI_INTR | - FSL_GIANFAR_DEV_HAS_PADDING | - FSL_GIANFAR_DEV_HAS_CSUM | - FSL_GIANFAR_DEV_HAS_VLAN | - FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | - FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | - FSL_GIANFAR_DEV_HAS_TIMER; + priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | + FSL_GIANFAR_DEV_HAS_COALESCE | + FSL_GIANFAR_DEV_HAS_RMON | + FSL_GIANFAR_DEV_HAS_MULTI_INTR | + FSL_GIANFAR_DEV_HAS_CSUM | + FSL_GIANFAR_DEV_HAS_VLAN | + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | + FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | + FSL_GIANFAR_DEV_HAS_TIMER; ctype = of_get_property(np, "phy-connection-type", NULL); @@ -768,23 +889,33 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv->phy_node = of_parse_phandle(np, "phy-handle", 0); + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (of_phy_is_fixed_link(np)) { + err = of_phy_register_fixed_link(np); + if (err) + goto err_grp_init; + + priv->phy_node = np; + } + /* Find the TBI PHY. If it's not there, we don't support SGMII */ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); return 0; -rx_alloc_failed: - free_rx_pointers(priv); -tx_alloc_failed: - free_tx_pointers(priv); err_grp_init: unmap_group_regs(priv); - free_netdev(dev); +rx_alloc_failed: + gfar_free_rx_queues(priv); +tx_alloc_failed: + gfar_free_tx_queues(priv); + free_gfar_dev(priv); return err; } -static int gfar_hwtstamp_ioctl(struct net_device *netdev, - struct ifreq *ifr, int cmd) +static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) { struct hwtstamp_config config; struct gfar_private *priv = netdev_priv(netdev); @@ -812,18 +943,16 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev, switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: if (priv->hwts_rx_en) { - stop_gfar(netdev); priv->hwts_rx_en = 0; - startup_gfar(netdev); + reset_gfar(netdev); } break; default: if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) return -ERANGE; if (!priv->hwts_rx_en) { - stop_gfar(netdev); priv->hwts_rx_en = 1; - startup_gfar(netdev); + reset_gfar(netdev); } config.rx_filter = HWTSTAMP_FILTER_ALL; break; @@ -833,7 +962,20 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev, -EFAULT : 0; } -/* Ioctl MII Interface */ +static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct gfar_private *priv = netdev_priv(netdev); + + config.flags = 0; + config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config.rx_filter = (priv->hwts_rx_en ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct gfar_private *priv = netdev_priv(dev); @@ -842,7 +984,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EINVAL; if (cmd == SIOCSHWTSTAMP) - return gfar_hwtstamp_ioctl(dev, rq, cmd); + return gfar_hwtstamp_set(dev, rq); + if (cmd == SIOCGHWTSTAMP) + return gfar_hwtstamp_get(dev, rq); if (!priv->phydev) return -ENODEV; @@ -850,18 +994,6 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(priv->phydev, rq, cmd); } -static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) -{ - unsigned int new_bit_map = 0x0; - int mask = 0x1 << (max_qs - 1), i; - for (i = 0; i < max_qs; i++) { - if (bit_map & mask) - new_bit_map = new_bit_map + (1 << i); - mask = mask >> 0x1; - } - return new_bit_map; -} - static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, u32 class) { @@ -929,9 +1061,8 @@ static void gfar_init_filer_table(struct gfar_private *priv) } } -static void gfar_detect_errata(struct gfar_private *priv) +static void __gfar_detect_errata_83xx(struct gfar_private *priv) { - struct device *dev = &priv->ofdev->dev; unsigned int pvr = mfspr(SPRN_PVR); unsigned int svr = mfspr(SPRN_SVR); unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ @@ -939,110 +1070,181 @@ static void gfar_detect_errata(struct gfar_private *priv) /* MPC8313 Rev 2.0 and higher; All MPC837x */ if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || - (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) + (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) priv->errata |= GFAR_ERRATA_74; /* MPC8313 and MPC837x all rev */ if ((pvr == 0x80850010 && mod == 0x80b0) || - (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) + (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) priv->errata |= GFAR_ERRATA_76; - /* MPC8313 and MPC837x all rev */ - if ((pvr == 0x80850010 && mod == 0x80b0) || - (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) - priv->errata |= GFAR_ERRATA_A002; + /* MPC8313 Rev < 2.0 */ + if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) + priv->errata |= GFAR_ERRATA_12; +} - /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ - if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || - (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) +static void __gfar_detect_errata_85xx(struct gfar_private *priv) +{ + unsigned int svr = mfspr(SPRN_SVR); + + if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) priv->errata |= GFAR_ERRATA_12; + if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || + ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) + priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ +} + +static void gfar_detect_errata(struct gfar_private *priv) +{ + struct device *dev = &priv->ofdev->dev; + + /* no plans to fix */ + priv->errata |= GFAR_ERRATA_A002; + + if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) + __gfar_detect_errata_85xx(priv); + else /* non-mpc85xx parts, i.e. e300 core based */ + __gfar_detect_errata_83xx(priv); if (priv->errata) dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", priv->errata); } -/* Set up the ethernet device structure, private data, - * and anything else we need before we start */ -static int gfar_probe(struct platform_device *ofdev) +void gfar_mac_reset(struct gfar_private *priv) { + struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; - struct net_device *dev = NULL; - struct gfar_private *priv = NULL; - struct gfar __iomem *regs = NULL; - int err = 0, i, grp_idx = 0; - int len_devname; - u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; - u32 isrg = 0; - u32 __iomem *baddr; - - err = gfar_of_init(ofdev, &dev); - if (err) - return err; - - priv = netdev_priv(dev); - priv->ndev = dev; - priv->ofdev = ofdev; - priv->node = ofdev->dev.of_node; - SET_NETDEV_DEV(dev, &ofdev->dev); - - spin_lock_init(&priv->bflock); - INIT_WORK(&priv->reset_task, gfar_reset_task); + /* Reset MAC layer */ + gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); - dev_set_drvdata(&ofdev->dev, priv); - regs = priv->gfargrp[0].regs; + /* We need to delay at least 3 TX clocks */ + udelay(3); - gfar_detect_errata(priv); + /* the soft reset bit is not self-resetting, so we need to + * clear it before resuming normal operation + */ + gfar_write(®s->maccfg1, 0); - /* Stop the DMA engine now, in case it was running before */ - /* (The firmware could have used it, and left it running). */ - gfar_halt(dev); + udelay(3); - /* Reset MAC layer */ - gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); + /* Compute rx_buff_size based on config flags */ + gfar_rx_buff_size_config(priv); - /* We need to delay at least 3 TX clocks */ - udelay(2); + /* Initialize the max receive frame/buffer lengths */ + gfar_write(®s->maxfrm, priv->rx_buffer_size); + gfar_write(®s->mrblr, priv->rx_buffer_size); - tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); - gfar_write(®s->maccfg1, tempval); + /* Initialize the Minimum Frame Length Register */ + gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); /* Initialize MACCFG2. */ tempval = MACCFG2_INIT_SETTINGS; - if (gfar_has_errata(priv, GFAR_ERRATA_74)) + + /* If the mtu is larger than the max size for standard + * ethernet frames (ie, a jumbo frame), then set maccfg2 + * to allow huge frames, and to check the length + */ + if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || + gfar_has_errata(priv, GFAR_ERRATA_74)) tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; + gfar_write(®s->maccfg2, tempval); + /* Clear mac addr hash registers */ + gfar_write(®s->igaddr0, 0); + gfar_write(®s->igaddr1, 0); + gfar_write(®s->igaddr2, 0); + gfar_write(®s->igaddr3, 0); + gfar_write(®s->igaddr4, 0); + gfar_write(®s->igaddr5, 0); + gfar_write(®s->igaddr6, 0); + gfar_write(®s->igaddr7, 0); + + gfar_write(®s->gaddr0, 0); + gfar_write(®s->gaddr1, 0); + gfar_write(®s->gaddr2, 0); + gfar_write(®s->gaddr3, 0); + gfar_write(®s->gaddr4, 0); + gfar_write(®s->gaddr5, 0); + gfar_write(®s->gaddr6, 0); + gfar_write(®s->gaddr7, 0); + + if (priv->extended_hash) + gfar_clear_exact_match(priv->ndev); + + gfar_mac_rx_config(priv); + + gfar_mac_tx_config(priv); + + gfar_set_mac_address(priv->ndev); + + gfar_set_multi(priv->ndev); + + /* clear ievent and imask before configuring coalescing */ + gfar_ints_disable(priv); + + /* Configure the coalescing support */ + gfar_configure_coalescing_all(priv); +} + +static void gfar_hw_init(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 attrs; + + /* Stop the DMA engine now, in case it was running before + * (The firmware could have used it, and left it running). + */ + gfar_halt(priv); + + gfar_mac_reset(priv); + + /* Zero out the rmon mib registers if it has them */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { + memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); + + /* Mask off the CAM interrupts */ + gfar_write(®s->rmon.cam1, 0xffffffff); + gfar_write(®s->rmon.cam2, 0xffffffff); + } + /* Initialize ECNTRL */ gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); - /* Set the dev->base_addr to the gfar reg region */ - dev->base_addr = (unsigned long) regs; + /* Set the extraction length and index */ + attrs = ATTRELI_EL(priv->rx_stash_size) | + ATTRELI_EI(priv->rx_stash_index); - SET_NETDEV_DEV(dev, &ofdev->dev); + gfar_write(®s->attreli, attrs); - /* Fill in the dev structure */ - dev->watchdog_timeo = TX_TIMEOUT; - dev->mtu = 1500; - dev->netdev_ops = &gfar_netdev_ops; - dev->ethtool_ops = &gfar_ethtool_ops; + /* Start with defaults, and add stashing + * depending on driver parameters + */ + attrs = ATTR_INIT_SETTINGS; - /* Register for napi ...We are registering NAPI for each grp */ - for (i = 0; i < priv->num_grps; i++) - netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); + if (priv->bd_stash_en) + attrs |= ATTR_BDSTASH; - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | - NETIF_F_RXCSUM; - dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | - NETIF_F_RXCSUM | NETIF_F_HIGHDMA; - } + if (priv->rx_stash_size != 0) + attrs |= ATTR_BUFSTASH; - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { - dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - } + gfar_write(®s->attr, attrs); + + /* FIFO configs */ + gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); + gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); + gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); + + /* Program the interrupt steering regs, only for MG devices */ + if (priv->num_grps > 1) + gfar_write_isrg(priv); +} + +static void gfar_init_addr_hash_table(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { priv->extended_hash = 1; @@ -1078,64 +1280,81 @@ static int gfar_probe(struct platform_device *ofdev) priv->hash_regs[6] = ®s->gaddr6; priv->hash_regs[7] = ®s->gaddr7; } +} - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) - priv->padding = DEFAULT_PADDING; - else - priv->padding = 0; +/* Set up the ethernet device structure, private data, + * and anything else we need before we start + */ +static int gfar_probe(struct platform_device *ofdev) +{ + struct net_device *dev = NULL; + struct gfar_private *priv = NULL; + int err = 0, i; - if (dev->features & NETIF_F_IP_CSUM || - priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) - dev->hard_header_len += GMAC_FCB_LEN; + err = gfar_of_init(ofdev, &dev); - /* Program the isrg regs only if number of grps > 1 */ - if (priv->num_grps > 1) { - baddr = ®s->isrg0; - for (i = 0; i < priv->num_grps; i++) { - isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); - isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); - gfar_write(baddr, isrg); - baddr++; - isrg = 0x0; + if (err) + return err; + + priv = netdev_priv(dev); + priv->ndev = dev; + priv->ofdev = ofdev; + priv->dev = &ofdev->dev; + SET_NETDEV_DEV(dev, &ofdev->dev); + + spin_lock_init(&priv->bflock); + INIT_WORK(&priv->reset_task, gfar_reset_task); + + platform_set_drvdata(ofdev, priv); + + gfar_detect_errata(priv); + + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long) priv->gfargrp[0].regs; + + /* Fill in the dev structure */ + dev->watchdog_timeo = TX_TIMEOUT; + dev->mtu = 1500; + dev->netdev_ops = &gfar_netdev_ops; + dev->ethtool_ops = &gfar_ethtool_ops; + + /* Register for napi ...We are registering NAPI for each grp */ + for (i = 0; i < priv->num_grps; i++) { + if (priv->poll_mode == GFAR_SQ_POLLING) { + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx_sq, GFAR_DEV_WEIGHT); + netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx_sq, 2); + } else { + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx, GFAR_DEV_WEIGHT); + netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx, 2); } } - /* Need to reverse the bit maps as bit_map's MSB is q0 - * but, for_each_set_bit parses from right to left, which - * basically reverses the queue numbers */ - for (i = 0; i< priv->num_grps; i++) { - priv->gfargrp[i].tx_bit_map = reverse_bitmap( - priv->gfargrp[i].tx_bit_map, MAX_TX_QS); - priv->gfargrp[i].rx_bit_map = reverse_bitmap( - priv->gfargrp[i].rx_bit_map, MAX_RX_QS); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HIGHDMA; } - /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, - * also assign queues to groups */ - for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { - priv->gfargrp[grp_idx].num_rx_queues = 0x0; - for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, - priv->num_rx_queues) { - priv->gfargrp[grp_idx].num_rx_queues++; - priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; - rstat = rstat | (RSTAT_CLEAR_RHALT >> i); - rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); - } - priv->gfargrp[grp_idx].num_tx_queues = 0x0; - for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, - priv->num_tx_queues) { - priv->gfargrp[grp_idx].num_tx_queues++; - priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; - tstat = tstat | (TSTAT_CLEAR_THALT >> i); - tqueue = tqueue | (TQUEUE_EN0 >> i); - } - priv->gfargrp[grp_idx].rstat = rstat; - priv->gfargrp[grp_idx].tstat = tstat; - rstat = tstat =0; + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; } - gfar_write(®s->rqueue, rqueue); - gfar_write(®s->tqueue, tqueue); + gfar_init_addr_hash_table(priv); + + /* Insert receive time stamps into padding alignment bytes */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) + priv->padding = 8; + + if (dev->features & NETIF_F_IP_CSUM || + priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) + dev->needed_headroom = GMAC_FCB_LEN; priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; @@ -1153,10 +1372,17 @@ static int gfar_probe(struct platform_device *ofdev) priv->rx_queue[i]->rxic = DEFAULT_RXIC; } - /* always enable rx filer*/ + /* always enable rx filer */ priv->rx_filer_enable = 1; /* Enable most messages by default */ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; + /* use pritority h/w tx queue scheduling for single queue devices */ + if (priv->num_tx_queues == 1) + priv->prio_sched_en = 1; + + set_bit(GFAR_DOWN, &priv->state); + + gfar_hw_init(priv); /* Carrier starts down, phylib will bring it up */ netif_carrier_off(dev); @@ -1169,61 +1395,37 @@ static int gfar_probe(struct platform_device *ofdev) } device_init_wakeup(&dev->dev, - priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + priv->device_flags & + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); /* fill out IRQ number and name fields */ - len_devname = strlen(dev->name); for (i = 0; i < priv->num_grps; i++) { - strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, - len_devname); + struct gfar_priv_grp *grp = &priv->gfargrp[i]; if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - strncpy(&priv->gfargrp[i].int_name_tx[len_devname], - "_g", sizeof("_g")); - priv->gfargrp[i].int_name_tx[ - strlen(priv->gfargrp[i].int_name_tx)] = i+48; - strncpy(&priv->gfargrp[i].int_name_tx[strlen( - priv->gfargrp[i].int_name_tx)], - "_tx", sizeof("_tx") + 1); - - strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, - len_devname); - strncpy(&priv->gfargrp[i].int_name_rx[len_devname], - "_g", sizeof("_g")); - priv->gfargrp[i].int_name_rx[ - strlen(priv->gfargrp[i].int_name_rx)] = i+48; - strncpy(&priv->gfargrp[i].int_name_rx[strlen( - priv->gfargrp[i].int_name_rx)], - "_rx", sizeof("_rx") + 1); - - strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, - len_devname); - strncpy(&priv->gfargrp[i].int_name_er[len_devname], - "_g", sizeof("_g")); - priv->gfargrp[i].int_name_er[strlen( - priv->gfargrp[i].int_name_er)] = i+48; - strncpy(&priv->gfargrp[i].int_name_er[strlen(\ - priv->gfargrp[i].int_name_er)], - "_er", sizeof("_er") + 1); + sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", + dev->name, "_g", '0' + i, "_tx"); + sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", + dev->name, "_g", '0' + i, "_rx"); + sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", + dev->name, "_g", '0' + i, "_er"); } else - priv->gfargrp[i].int_name_tx[len_devname] = '\0'; + strcpy(gfar_irq(grp, TX)->name, dev->name); } /* Initialize the filer table */ gfar_init_filer_table(priv); - /* Create all the sysfs files */ - gfar_init_sysfs(dev); - /* Print out the device info */ netdev_info(dev, "mac: %pM\n", dev->dev_addr); - /* Even more device info helps when determining which kernel */ - /* provided which set of benchmarks. */ + /* Even more device info helps when determining which kernel + * provided which set of benchmarks. + */ netdev_info(dev, "Running with NAPI enabled\n"); for (i = 0; i < priv->num_rx_queues; i++) netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", i, priv->rx_queue[i]->rx_ring_size); - for(i = 0; i < priv->num_tx_queues; i++) + for (i = 0; i < priv->num_tx_queues; i++) netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", i, priv->tx_queue[i]->tx_ring_size); @@ -1231,30 +1433,30 @@ static int gfar_probe(struct platform_device *ofdev) register_fail: unmap_group_regs(priv); - free_tx_pointers(priv); - free_rx_pointers(priv); + gfar_free_rx_queues(priv); + gfar_free_tx_queues(priv); if (priv->phy_node) of_node_put(priv->phy_node); if (priv->tbi_node) of_node_put(priv->tbi_node); - free_netdev(dev); + free_gfar_dev(priv); return err; } static int gfar_remove(struct platform_device *ofdev) { - struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); + struct gfar_private *priv = platform_get_drvdata(ofdev); if (priv->phy_node) of_node_put(priv->phy_node); if (priv->tbi_node) of_node_put(priv->tbi_node); - dev_set_drvdata(&ofdev->dev, NULL); - unregister_netdev(priv->ndev); unmap_group_regs(priv); - free_netdev(priv->ndev); + gfar_free_rx_queues(priv); + gfar_free_tx_queues(priv); + free_gfar_dev(priv); return 0; } @@ -1270,7 +1472,8 @@ static int gfar_suspend(struct device *dev) u32 tempval; int magic_packet = priv->wol_en && - (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + (priv->device_flags & + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); netif_device_detach(ndev); @@ -1278,9 +1481,8 @@ static int gfar_suspend(struct device *dev) local_irq_save(flags); lock_tx_qs(priv); - lock_rx_qs(priv); - gfar_halt_nodisable(ndev); + gfar_halt_nodisable(priv); /* Disable Tx, and Rx if wake-on-LAN is disabled. */ tempval = gfar_read(®s->maccfg1); @@ -1292,7 +1494,6 @@ static int gfar_suspend(struct device *dev) gfar_write(®s->maccfg1, tempval); - unlock_rx_qs(priv); unlock_tx_qs(priv); local_irq_restore(flags); @@ -1322,7 +1523,8 @@ static int gfar_resume(struct device *dev) unsigned long flags; u32 tempval; int magic_packet = priv->wol_en && - (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + (priv->device_flags & + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); if (!netif_running(ndev)) { netif_device_attach(ndev); @@ -1337,15 +1539,13 @@ static int gfar_resume(struct device *dev) */ local_irq_save(flags); lock_tx_qs(priv); - lock_rx_qs(priv); tempval = gfar_read(®s->maccfg2); tempval &= ~MACCFG2_MPEN; gfar_write(®s->maccfg2, tempval); - gfar_start(ndev); + gfar_start(priv); - unlock_rx_qs(priv); unlock_tx_qs(priv); local_irq_restore(flags); @@ -1361,14 +1561,22 @@ static int gfar_restore(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; - if (!netif_running(ndev)) + if (!netif_running(ndev)) { + netif_device_attach(ndev); + return 0; + } + + if (gfar_init_bds(ndev)) { + free_skb_resources(priv); + return -ENOMEM; + } - gfar_init_bds(ndev); - init_registers(ndev); - gfar_set_mac_address(ndev); - gfar_init_mac(ndev); - gfar_start(ndev); + gfar_mac_reset(priv); + + gfar_init_tx_rx_base(priv); + + gfar_start(priv); priv->oldlink = 0; priv->oldspeed = 0; @@ -1421,13 +1629,13 @@ static phy_interface_t gfar_get_interface(struct net_device *dev) } if (ecntrl & ECNTRL_REDUCED_MODE) { - if (ecntrl & ECNTRL_REDUCED_MII_MODE) + if (ecntrl & ECNTRL_REDUCED_MII_MODE) { return PHY_INTERFACE_MODE_RMII; + } else { phy_interface_t interface = priv->interface; - /* - * This isn't autodetected right now, so it must + /* This isn't autodetected right now, so it must * be set by the device tree or platform code. */ if (interface == PHY_INTERFACE_MODE_RGMII_ID) @@ -1452,7 +1660,7 @@ static int init_phy(struct net_device *dev) struct gfar_private *priv = netdev_priv(dev); uint gigabit_support = priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? - SUPPORTED_1000baseT_Full : 0; + GFAR_SUPPORTED_GBIT : 0; phy_interface_t interface; priv->oldlink = 0; @@ -1463,9 +1671,6 @@ static int init_phy(struct net_device *dev) priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, interface); - if (!priv->phydev) - priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, - interface); if (!priv->phydev) { dev_err(&dev->dev, "could not attach to PHY\n"); return -ENODEV; @@ -1481,8 +1686,7 @@ static int init_phy(struct net_device *dev) return 0; } -/* - * Initialize TBI PHY interface for communicating with the +/* Initialize TBI PHY interface for communicating with the * SERDES lynx PHY on the chip. We communicate with this PHY * through the MDIO bus on each controller, treating it as a * "normal" PHY at the address found in the TBIPA register. We assume @@ -1507,8 +1711,7 @@ static void gfar_configure_serdes(struct net_device *dev) return; } - /* - * If the link is already up, we must already be ok, and don't need to + /* If the link is already up, we must already be ok, and don't need to * configure and reset the TBI<->SerDes link. Maybe U-Boot configured * everything for us? Resetting it takes the link down and requires * several seconds for it to come back. @@ -1520,77 +1723,25 @@ static void gfar_configure_serdes(struct net_device *dev) phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); phy_write(tbiphy, MII_ADVERTISE, - ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM); - - phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | - BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); -} - -static void init_registers(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = NULL; - int i = 0; - - for (i = 0; i < priv->num_grps; i++) { - regs = priv->gfargrp[i].regs; - /* Clear IEVENT */ - gfar_write(®s->ievent, IEVENT_INIT_CLEAR); - - /* Initialize IMASK */ - gfar_write(®s->imask, IMASK_INIT_CLEAR); - } - - regs = priv->gfargrp[0].regs; - /* Init hash registers to zero */ - gfar_write(®s->igaddr0, 0); - gfar_write(®s->igaddr1, 0); - gfar_write(®s->igaddr2, 0); - gfar_write(®s->igaddr3, 0); - gfar_write(®s->igaddr4, 0); - gfar_write(®s->igaddr5, 0); - gfar_write(®s->igaddr6, 0); - gfar_write(®s->igaddr7, 0); + ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM); - gfar_write(®s->gaddr0, 0); - gfar_write(®s->gaddr1, 0); - gfar_write(®s->gaddr2, 0); - gfar_write(®s->gaddr3, 0); - gfar_write(®s->gaddr4, 0); - gfar_write(®s->gaddr5, 0); - gfar_write(®s->gaddr6, 0); - gfar_write(®s->gaddr7, 0); - - /* Zero out the rmon mib registers if it has them */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { - memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); - - /* Mask off the CAM interrupts */ - gfar_write(®s->rmon.cam1, 0xffffffff); - gfar_write(®s->rmon.cam2, 0xffffffff); - } - - /* Initialize the max receive buffer length */ - gfar_write(®s->mrblr, priv->rx_buffer_size); - - /* Initialize the Minimum Frame Length Register */ - gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); + phy_write(tbiphy, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | + BMCR_SPEED1000); } static int __gfar_is_rx_idle(struct gfar_private *priv) { u32 res; - /* - * Normaly TSEC should not hang on GRS commands, so we should + /* Normaly TSEC should not hang on GRS commands, so we should * actually wait for IEVENT_GRSC flag. */ - if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) + if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) return 0; - /* - * Read the eTSEC register at offset 0xD1C. If bits 7-14 are + /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are * the same as bits 23-30, the eTSEC Rx is assumed to be idle * and the Rx can be safely reset. */ @@ -1603,27 +1754,17 @@ static int __gfar_is_rx_idle(struct gfar_private *priv) } /* Halt the receive and transmit queues */ -static void gfar_halt_nodisable(struct net_device *dev) +static void gfar_halt_nodisable(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = NULL; + struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; - int i = 0; - for (i = 0; i < priv->num_grps; i++) { - regs = priv->gfargrp[i].regs; - /* Mask all interrupts */ - gfar_write(®s->imask, IMASK_INIT_CLEAR); - - /* Clear all interrupts */ - gfar_write(®s->ievent, IEVENT_INIT_CLEAR); - } + gfar_ints_disable(priv); - regs = priv->gfargrp[0].regs; /* Stop the DMA, and wait for it to stop */ tempval = gfar_read(®s->dmactrl); - if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) - != (DMACTRL_GRS | DMACTRL_GTS)) { + if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != + (DMACTRL_GRS | DMACTRL_GTS)) { int ret; tempval |= (DMACTRL_GRS | DMACTRL_GTS); @@ -1640,56 +1781,41 @@ static void gfar_halt_nodisable(struct net_device *dev) } /* Halt the receive and transmit queues */ -void gfar_halt(struct net_device *dev) +void gfar_halt(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; - gfar_halt_nodisable(dev); + /* Dissable the Rx/Tx hw queues */ + gfar_write(®s->rqueue, 0); + gfar_write(®s->tqueue, 0); + + mdelay(10); + + gfar_halt_nodisable(priv); - /* Disable Rx and Tx */ + /* Disable Rx/Tx DMA */ tempval = gfar_read(®s->maccfg1); tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); gfar_write(®s->maccfg1, tempval); } -static void free_grp_irqs(struct gfar_priv_grp *grp) -{ - free_irq(grp->interruptError, grp); - free_irq(grp->interruptTransmit, grp); - free_irq(grp->interruptReceive, grp); -} - void stop_gfar(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - unsigned long flags; - int i; - - phy_stop(priv->phydev); + netif_tx_stop_all_queues(dev); - /* Lock it down */ - local_irq_save(flags); - lock_tx_qs(priv); - lock_rx_qs(priv); + smp_mb__before_atomic(); + set_bit(GFAR_DOWN, &priv->state); + smp_mb__after_atomic(); - gfar_halt(dev); + disable_napi(priv); - unlock_rx_qs(priv); - unlock_tx_qs(priv); - local_irq_restore(flags); + /* disable ints and gracefully shut down Rx/Tx DMA */ + gfar_halt(priv); - /* Free the IRQs */ - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { - for (i = 0; i < priv->num_grps; i++) - free_grp_irqs(&priv->gfargrp[i]); - } else { - for (i = 0; i < priv->num_grps; i++) - free_irq(priv->gfargrp[i].interruptTransmit, - &priv->gfargrp[i]); - } + phy_stop(priv->phydev); free_skb_resources(priv); } @@ -1706,20 +1832,21 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) if (!tx_queue->tx_skbuff[i]) continue; - dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, - txbdp->length, DMA_TO_DEVICE); + dma_unmap_single(priv->dev, txbdp->bufPtr, + txbdp->length, DMA_TO_DEVICE); txbdp->lstatus = 0; for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; - j++) { + j++) { txbdp++; - dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, - txbdp->length, DMA_TO_DEVICE); + dma_unmap_page(priv->dev, txbdp->bufPtr, + txbdp->length, DMA_TO_DEVICE); } txbdp++; dev_kfree_skb_any(tx_queue->tx_skbuff[i]); tx_queue->tx_skbuff[i] = NULL; } kfree(tx_queue->tx_skbuff); + tx_queue->tx_skbuff = NULL; } static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) @@ -1732,9 +1859,9 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) for (i = 0; i < rx_queue->rx_ring_size; i++) { if (rx_queue->rx_skbuff[i]) { - dma_unmap_single(&priv->ofdev->dev, - rxbdp->bufPtr, priv->rx_buffer_size, - DMA_FROM_DEVICE); + dma_unmap_single(priv->dev, rxbdp->bufPtr, + priv->rx_buffer_size, + DMA_FROM_DEVICE); dev_kfree_skb_any(rx_queue->rx_skbuff[i]); rx_queue->rx_skbuff[i] = NULL; } @@ -1743,10 +1870,12 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) rxbdp++; } kfree(rx_queue->rx_skbuff); + rx_queue->rx_skbuff = NULL; } /* If there are any tx skbs or rx skbs still around, free them. - * Then free tx_skbuff and rx_skbuff */ + * Then free tx_skbuff and rx_skbuff + */ static void free_skb_resources(struct gfar_private *priv) { struct gfar_priv_tx_q *tx_queue = NULL; @@ -1755,36 +1884,37 @@ static void free_skb_resources(struct gfar_private *priv) /* Go through all the buffer descriptors and free their data buffers */ for (i = 0; i < priv->num_tx_queues; i++) { + struct netdev_queue *txq; + tx_queue = priv->tx_queue[i]; - if(tx_queue->tx_skbuff) + txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); + if (tx_queue->tx_skbuff) free_skb_tx_queue(tx_queue); + netdev_tx_reset_queue(txq); } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - if(rx_queue->rx_skbuff) + if (rx_queue->rx_skbuff) free_skb_rx_queue(rx_queue); } - dma_free_coherent(&priv->ofdev->dev, - sizeof(struct txbd8) * priv->total_tx_ring_size + - sizeof(struct rxbd8) * priv->total_rx_ring_size, - priv->tx_queue[0]->tx_bd_base, - priv->tx_queue[0]->tx_bd_dma_base); - skb_queue_purge(&priv->rx_recycle); + dma_free_coherent(priv->dev, + sizeof(struct txbd8) * priv->total_tx_ring_size + + sizeof(struct rxbd8) * priv->total_rx_ring_size, + priv->tx_queue[0]->tx_bd_base, + priv->tx_queue[0]->tx_bd_dma_base); } -void gfar_start(struct net_device *dev) +void gfar_start(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; int i = 0; - /* Enable Rx and Tx in MACCFG1 */ - tempval = gfar_read(®s->maccfg1); - tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); - gfar_write(®s->maccfg1, tempval); + /* Enable Rx/Tx hw queues */ + gfar_write(®s->rqueue, priv->rqueue); + gfar_write(®s->tqueue, priv->tqueue); /* Initialize DMACTRL to have WWR and WOP */ tempval = gfar_read(®s->dmactrl); @@ -1801,48 +1931,23 @@ void gfar_start(struct net_device *dev) /* Clear THLT/RHLT, so that the DMA starts polling now */ gfar_write(®s->tstat, priv->gfargrp[i].tstat); gfar_write(®s->rstat, priv->gfargrp[i].rstat); - /* Unmask the interrupts we look for */ - gfar_write(®s->imask, IMASK_DEFAULT); } - dev->trans_start = jiffies; /* prevent tx timeout */ -} - -void gfar_configure_coalescing(struct gfar_private *priv, - unsigned long tx_mask, unsigned long rx_mask) -{ - struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 __iomem *baddr; - int i = 0; - - /* Backward compatible case ---- even if we enable - * multiple queues, there's only single reg to program - */ - gfar_write(®s->txic, 0); - if(likely(priv->tx_queue[0]->txcoalescing)) - gfar_write(®s->txic, priv->tx_queue[0]->txic); + /* Enable Rx/Tx DMA */ + tempval = gfar_read(®s->maccfg1); + tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); - gfar_write(®s->rxic, 0); - if(unlikely(priv->rx_queue[0]->rxcoalescing)) - gfar_write(®s->rxic, priv->rx_queue[0]->rxic); + gfar_ints_enable(priv); - if (priv->mode == MQ_MG_MODE) { - baddr = ®s->txic0; - for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { - if (likely(priv->tx_queue[i]->txcoalescing)) { - gfar_write(baddr + i, 0); - gfar_write(baddr + i, priv->tx_queue[i]->txic); - } - } + priv->ndev->trans_start = jiffies; /* prevent tx timeout */ +} - baddr = ®s->rxic0; - for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { - if (likely(priv->rx_queue[i]->rxcoalescing)) { - gfar_write(baddr + i, 0); - gfar_write(baddr + i, priv->rx_queue[i]->rxic); - } - } - } +static void free_grp_irqs(struct gfar_priv_grp *grp) +{ + free_irq(gfar_irq(grp, TX)->irq, grp); + free_irq(gfar_irq(grp, RX)->irq, grp); + free_irq(gfar_irq(grp, ER)->irq, grp); } static int register_grp_irqs(struct gfar_priv_grp *grp) @@ -1852,36 +1957,40 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) int err; /* If the device has multiple interrupts, register for - * them. Otherwise, only register for the one */ + * them. Otherwise, only register for the one + */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { /* Install our interrupt handlers for Error, - * Transmit, and Receive */ - if ((err = request_irq(grp->interruptError, gfar_error, 0, - grp->int_name_er,grp)) < 0) { + * Transmit, and Receive + */ + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, + gfar_irq(grp, ER)->name, grp); + if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", - grp->interruptError); + gfar_irq(grp, ER)->irq); goto err_irq_fail; } - - if ((err = request_irq(grp->interruptTransmit, gfar_transmit, - 0, grp->int_name_tx, grp)) < 0) { + err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, + gfar_irq(grp, TX)->name, grp); + if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", - grp->interruptTransmit); + gfar_irq(grp, TX)->irq); goto tx_irq_fail; } - - if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, - grp->int_name_rx, grp)) < 0) { + err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, + gfar_irq(grp, RX)->name, grp); + if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", - grp->interruptReceive); + gfar_irq(grp, RX)->irq); goto rx_irq_fail; } } else { - if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, - grp->int_name_tx, grp)) < 0) { + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, + gfar_irq(grp, TX)->name, grp); + if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", - grp->interruptTransmit); + gfar_irq(grp, TX)->irq); goto err_irq_fail; } } @@ -1889,86 +1998,94 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) return 0; rx_irq_fail: - free_irq(grp->interruptTransmit, grp); + free_irq(gfar_irq(grp, TX)->irq, grp); tx_irq_fail: - free_irq(grp->interruptError, grp); + free_irq(gfar_irq(grp, ER)->irq, grp); err_irq_fail: return err; } -/* Bring the controller up and running */ -int startup_gfar(struct net_device *ndev) +static void gfar_free_irq(struct gfar_private *priv) { - struct gfar_private *priv = netdev_priv(ndev); - struct gfar __iomem *regs = NULL; - int err, i, j; + int i; - for (i = 0; i < priv->num_grps; i++) { - regs= priv->gfargrp[i].regs; - gfar_write(®s->imask, IMASK_INIT_CLEAR); + /* Free the IRQs */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + for (i = 0; i < priv->num_grps; i++) + free_grp_irqs(&priv->gfargrp[i]); + } else { + for (i = 0; i < priv->num_grps; i++) + free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, + &priv->gfargrp[i]); } +} - regs= priv->gfargrp[0].regs; - err = gfar_alloc_skb_resources(ndev); - if (err) - return err; - - gfar_init_mac(ndev); +static int gfar_request_irq(struct gfar_private *priv) +{ + int err, i, j; for (i = 0; i < priv->num_grps; i++) { err = register_grp_irqs(&priv->gfargrp[i]); if (err) { for (j = 0; j < i; j++) free_grp_irqs(&priv->gfargrp[j]); - goto irq_fail; + return err; } } - /* Start the controller */ - gfar_start(ndev); + return 0; +} + +/* Bring the controller up and running */ +int startup_gfar(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + int err; + + gfar_mac_reset(priv); + + err = gfar_alloc_skb_resources(ndev); + if (err) + return err; + + gfar_init_tx_rx_base(priv); + + smp_mb__before_atomic(); + clear_bit(GFAR_DOWN, &priv->state); + smp_mb__after_atomic(); + + /* Start Rx/Tx DMA and enable the interrupts */ + gfar_start(priv); phy_start(priv->phydev); - gfar_configure_coalescing(priv, 0xFF, 0xFF); + enable_napi(priv); - return 0; + netif_tx_wake_all_queues(ndev); -irq_fail: - free_skb_resources(priv); - return err; + return 0; } -/* Called when something needs to use the ethernet device */ -/* Returns 0 for success. */ +/* Called when something needs to use the ethernet device + * Returns 0 for success. + */ static int gfar_enet_open(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); int err; - enable_napi(priv); - - skb_queue_head_init(&priv->rx_recycle); - - /* Initialize a bunch of registers */ - init_registers(dev); - - gfar_set_mac_address(dev); - err = init_phy(dev); + if (err) + return err; - if (err) { - disable_napi(priv); + err = gfar_request_irq(priv); + if (err) return err; - } err = startup_gfar(dev); - if (err) { - disable_napi(priv); + if (err) return err; - } - - netif_tx_start_all_queues(dev); device_set_wakeup_enable(&dev->dev, priv->wol_en); @@ -1985,18 +2102,17 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) } static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, - int fcb_length) + int fcb_length) { - u8 flags = 0; - /* If we're here, it's a IP packet with a TCP or UDP * payload. We set it to checksum, using a pseudo-header * we provide */ - flags = TXFCB_DEFAULT; + u8 flags = TXFCB_DEFAULT; - /* Tell the controller what the protocol is */ - /* And provide the already calculated phcs */ + /* Tell the controller what the protocol is + * And provide the already calculated phcs + */ if (ip_hdr(skb)->protocol == IPPROTO_UDP) { flags |= TXFCB_UDP; fcb->phcs = udp_hdr(skb)->check; @@ -2006,7 +2122,8 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, /* l3os is the distance between the start of the * frame (skb->data) and the start of the IP hdr. * l4os is the distance between the start of the - * l3 hdr and the l4 hdr */ + * l3 hdr and the l4 hdr + */ fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); fcb->l4os = skb_network_header_len(skb); @@ -2020,7 +2137,7 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) } static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, - struct txbd8 *base, int ring_size) + struct txbd8 *base, int ring_size) { struct txbd8 *new_bd = bdp + stride; @@ -2028,13 +2145,32 @@ static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, } static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, - int ring_size) + int ring_size) { return skip_txbd(bdp, 1, base, ring_size); } -/* This is called by the kernel when a frame is ready for transmission. */ -/* It is pointed to by the dev->hard_start_xmit function pointer */ +/* eTSEC12: csum generation not supported for some fcb offsets */ +static inline bool gfar_csum_errata_12(struct gfar_private *priv, + unsigned long fcb_addr) +{ + return (gfar_has_errata(priv, GFAR_ERRATA_12) && + (fcb_addr % 0x20) > 0x18); +} + +/* eTSEC76: csum generation for frames larger than 2500 may + * cause excess delays before start of transmission + */ +static inline bool gfar_csum_errata_76(struct gfar_private *priv, + unsigned int len) +{ + return (gfar_has_errata(priv, GFAR_ERRATA_76) && + (len > 2500)); +} + +/* This is called by the kernel when a frame is ready for transmission. + * It is pointed to by the dev->hard_start_xmit function pointer + */ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); @@ -2044,24 +2180,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) struct txfcb *fcb = NULL; struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; u32 lstatus; - int i, rq = 0, do_tstamp = 0; + int i, rq = 0; + int do_tstamp, do_csum, do_vlan; u32 bufaddr; unsigned long flags; - unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN; - - /* - * TOE=1 frames larger than 2500 bytes may see excess delays - * before start of transmission. - */ - if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && - skb->ip_summed == CHECKSUM_PARTIAL && - skb->len > 2500)) { - int ret; - - ret = skb_checksum_help(skb); - if (ret) - return ret; - } + unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; rq = skb->queue_mapping; tx_queue = priv->tx_queue[rq]; @@ -2069,31 +2192,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) base = tx_queue->tx_bd_base; regs = tx_queue->grp->regs; + do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); + do_vlan = vlan_tx_tag_present(skb); + do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en; + + if (do_csum || do_vlan) + fcb_len = GMAC_FCB_LEN; + /* check if time stamp should be generated */ - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - priv->hwts_tx_en)) { - do_tstamp = 1; - fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN; - } + if (unlikely(do_tstamp)) + fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; /* make space for additional header when fcb is needed */ - if (((skb->ip_summed == CHECKSUM_PARTIAL) || - vlan_tx_tag_present(skb) || - unlikely(do_tstamp)) && - (skb_headroom(skb) < fcb_length)) { + if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { struct sk_buff *skb_new; - skb_new = skb_realloc_headroom(skb, fcb_length); + skb_new = skb_realloc_headroom(skb, fcb_len); if (!skb_new) { dev->stats.tx_errors++; - kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - /* Steal sock reference for processing TX time stamps */ - swap(skb_new->sk, skb->sk); - swap(skb_new->destructor, skb->destructor); - kfree_skb(skb); + if (skb->sk) + skb_set_owner_w(skb_new, skb->sk); + dev_consume_skb_any(skb); skb = skb_new; } @@ -2115,7 +2239,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Update transmit stats */ - tx_queue->stats.tx_bytes += skb->len; + bytes_sent = skb->len; + tx_queue->stats.tx_bytes += bytes_sent; + /* keep Tx bytes on wire for BQL accounting */ + GFAR_CB(skb)->bytes_sent = bytes_sent; tx_queue->stats.tx_packets++; txbdp = txbdp_start = tx_queue->cur_tx; @@ -2124,33 +2251,34 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Time stamp insertion requires one additional TxBD */ if (unlikely(do_tstamp)) txbdp_tstamp = txbdp = next_txbd(txbdp, base, - tx_queue->tx_ring_size); + tx_queue->tx_ring_size); if (nr_frags == 0) { if (unlikely(do_tstamp)) txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | - TXBD_INTERRUPT); + TXBD_INTERRUPT); else lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); } else { /* Place the fragment addresses and lengths into the TxBDs */ for (i = 0; i < nr_frags; i++) { + unsigned int frag_len; /* Point at the next BD, wrapping as needed */ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); - length = skb_shinfo(skb)->frags[i].size; + frag_len = skb_shinfo(skb)->frags[i].size; - lstatus = txbdp->lstatus | length | - BD_LFLAG(TXBD_READY); + lstatus = txbdp->lstatus | frag_len | + BD_LFLAG(TXBD_READY); /* Handle the last BD specially */ if (i == nr_frags - 1) lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - bufaddr = skb_frag_dma_map(&priv->ofdev->dev, + bufaddr = skb_frag_dma_map(priv->dev, &skb_shinfo(skb)->frags[i], 0, - length, + frag_len, DMA_TO_DEVICE); /* set the TxBD length and buffer pointer */ @@ -2167,58 +2295,60 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) memset(skb->data, 0, GMAC_TXPAL_LEN); } - /* Set up checksumming */ - if (CHECKSUM_PARTIAL == skb->ip_summed) { + /* Add TxFCB if required */ + if (fcb_len) { fcb = gfar_add_fcb(skb); - /* as specified by errata */ - if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) - && ((unsigned long)fcb % 0x20) > 0x18)) { + lstatus |= BD_LFLAG(TXBD_TOE); + } + + /* Set up checksumming */ + if (do_csum) { + gfar_tx_checksum(skb, fcb, fcb_len); + + if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || + unlikely(gfar_csum_errata_76(priv, skb->len))) { __skb_pull(skb, GMAC_FCB_LEN); skb_checksum_help(skb); - } else { - lstatus |= BD_LFLAG(TXBD_TOE); - gfar_tx_checksum(skb, fcb, fcb_length); + if (do_vlan || do_tstamp) { + /* put back a new fcb for vlan/tstamp TOE */ + fcb = gfar_add_fcb(skb); + } else { + /* Tx TOE not used */ + lstatus &= ~(BD_LFLAG(TXBD_TOE)); + fcb = NULL; + } } } - if (vlan_tx_tag_present(skb)) { - if (unlikely(NULL == fcb)) { - fcb = gfar_add_fcb(skb); - lstatus |= BD_LFLAG(TXBD_TOE); - } - + if (do_vlan) gfar_tx_vlan(skb, fcb); - } /* Setup tx hardware time stamping if requested */ if (unlikely(do_tstamp)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - if (fcb == NULL) - fcb = gfar_add_fcb(skb); fcb->ptp = 1; - lstatus |= BD_LFLAG(TXBD_TOE); } - txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); + txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); - /* - * If time stamping is requested one additional TxBD must be set up. The + /* If time stamping is requested one additional TxBD must be set up. The * first TxBD points to the FCB and must have a data length of * GMAC_FCB_LEN. The second TxBD points to the actual frame data with * the full frame length. */ if (unlikely(do_tstamp)) { - txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length; + txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | - (skb_headlen(skb) - fcb_length); + (skb_headlen(skb) - fcb_len); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; } else { lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); } - /* - * We can work in parallel with gfar_clean_tx_ring(), except + netdev_tx_sent_queue(txq, bytes_sent); + + /* We can work in parallel with gfar_clean_tx_ring(), except * when modifying num_txbdfree. Note that we didn't grab the lock * when we were reading the num_txbdfree and checking for available * space, that's because outside of this function it can only grow, @@ -2231,8 +2361,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) */ spin_lock_irqsave(&tx_queue->txlock, flags); - /* - * The powerpc-specific eieio() is used, as wmb() has too strong + /* The powerpc-specific eieio() is used, as wmb() has too strong * semantics (it requires synchronization between cacheable and * uncacheable mappings, which eieio doesn't provide and which we * don't need), thus requiring a more expensive sync instruction. At @@ -2248,9 +2377,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; /* Update the current skb pointer to the next entry we will use - * (wrapping if necessary) */ + * (wrapping if necessary) + */ tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & - TX_RING_MOD_MASK(tx_queue->tx_ring_size); + TX_RING_MOD_MASK(tx_queue->tx_ring_size); tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); @@ -2258,7 +2388,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_queue->num_txbdfree -= (nr_txbds); /* If the next BD still needs to be cleaned up, then the bds - are full. We need to tell the kernel to stop sending us stuff. */ + * are full. We need to tell the kernel to stop sending us stuff. + */ if (!tx_queue->num_txbdfree) { netif_tx_stop_queue(txq); @@ -2279,8 +2410,6 @@ static int gfar_close(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - disable_napi(priv); - cancel_work_sync(&priv->reset_task); stop_gfar(dev); @@ -2288,7 +2417,7 @@ static int gfar_close(struct net_device *dev) phy_disconnect(priv->phydev); priv->phydev = NULL; - netif_tx_stop_all_queues(dev); + gfar_free_irq(priv); return 0; } @@ -2301,121 +2430,43 @@ static int gfar_set_mac_address(struct net_device *dev) return 0; } -/* Check if rx parser should be activated */ -void gfar_check_rx_parser_mode(struct gfar_private *priv) -{ - struct gfar __iomem *regs; - u32 tempval; - - regs = priv->gfargrp[0].regs; - - tempval = gfar_read(®s->rctrl); - /* If parse is no longer required, then disable parser */ - if (tempval & RCTRL_REQ_PARSER) - tempval |= RCTRL_PRSDEP_INIT; - else - tempval &= ~RCTRL_PRSDEP_INIT; - gfar_write(®s->rctrl, tempval); -} - -/* Enables and disables VLAN insertion/extraction */ -void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) -{ - struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = NULL; - unsigned long flags; - u32 tempval; - - regs = priv->gfargrp[0].regs; - local_irq_save(flags); - lock_rx_qs(priv); - - if (features & NETIF_F_HW_VLAN_TX) { - /* Enable VLAN tag insertion */ - tempval = gfar_read(®s->tctrl); - tempval |= TCTRL_VLINS; - gfar_write(®s->tctrl, tempval); - } else { - /* Disable VLAN tag insertion */ - tempval = gfar_read(®s->tctrl); - tempval &= ~TCTRL_VLINS; - gfar_write(®s->tctrl, tempval); - } - - if (features & NETIF_F_HW_VLAN_RX) { - /* Enable VLAN tag extraction */ - tempval = gfar_read(®s->rctrl); - tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); - gfar_write(®s->rctrl, tempval); - } else { - /* Disable VLAN tag extraction */ - tempval = gfar_read(®s->rctrl); - tempval &= ~RCTRL_VLEX; - gfar_write(®s->rctrl, tempval); - - gfar_check_rx_parser_mode(priv); - } - - gfar_change_mtu(dev, dev->mtu); - - unlock_rx_qs(priv); - local_irq_restore(flags); -} - static int gfar_change_mtu(struct net_device *dev, int new_mtu) { - int tempsize, tempval; struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - int oldsize = priv->rx_buffer_size; int frame_size = new_mtu + ETH_HLEN; - if (gfar_is_vlan_on(priv)) - frame_size += VLAN_HLEN; - if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { netif_err(priv, drv, dev, "Invalid MTU setting\n"); return -EINVAL; } - if (gfar_uses_fcb(priv)) - frame_size += GMAC_FCB_LEN; - - frame_size += priv->padding; - - tempsize = - (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + - INCREMENTAL_BUFFER_SIZE; + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); - /* Only stop and start the controller if it isn't already - * stopped, and we changed something */ - if ((oldsize != tempsize) && (dev->flags & IFF_UP)) + if (dev->flags & IFF_UP) stop_gfar(dev); - priv->rx_buffer_size = tempsize; - dev->mtu = new_mtu; - gfar_write(®s->mrblr, priv->rx_buffer_size); - gfar_write(®s->maxfrm, priv->rx_buffer_size); + if (dev->flags & IFF_UP) + startup_gfar(dev); - /* If the mtu is larger than the max size for standard - * ethernet frames (ie, a jumbo frame), then set maccfg2 - * to allow huge frames, and to check the length */ - tempval = gfar_read(®s->maccfg2); + clear_bit_unlock(GFAR_RESETTING, &priv->state); - if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || - gfar_has_errata(priv, GFAR_ERRATA_74)) - tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); - else - tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); + return 0; +} - gfar_write(®s->maccfg2, tempval); +void reset_gfar(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); - if ((oldsize != tempsize) && (dev->flags & IFF_UP)) - startup_gfar(dev); + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); - return 0; + stop_gfar(ndev); + startup_gfar(ndev); + + clear_bit_unlock(GFAR_RESETTING, &priv->state); } /* gfar_reset_task gets scheduled when a packet has not been @@ -2426,17 +2477,8 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) static void gfar_reset_task(struct work_struct *work) { struct gfar_private *priv = container_of(work, struct gfar_private, - reset_task); - struct net_device *dev = priv->ndev; - - if (dev->flags & IFF_UP) { - netif_tx_stop_all_queues(dev); - stop_gfar(dev); - startup_gfar(dev); - netif_tx_start_all_queues(dev); - } - - netif_tx_schedule_all(dev); + reset_task); + reset_gfar(priv->ndev); } static void gfar_timeout(struct net_device *dev) @@ -2453,15 +2495,15 @@ static void gfar_align_skb(struct sk_buff *skb) * as many bytes as needed to align the data properly */ skb_reserve(skb, RXBUF_ALIGNMENT - - (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); + (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); } /* Interrupt Handler for Transmit complete */ -static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) +static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { struct net_device *dev = tx_queue->dev; + struct netdev_queue *txq; struct gfar_private *priv = netdev_priv(dev); - struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *bdp, *next = NULL; struct txbd8 *lbdp = NULL; struct txbd8 *base = tx_queue->tx_bd_base; @@ -2471,10 +2513,12 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) int frags = 0, nr_txbds = 0; int i; int howmany = 0; + int tqi = tx_queue->qindex; + unsigned int bytes_sent = 0; u32 lstatus; size_t buflen; - rx_queue = priv->rx_queue[tx_queue->qindex]; + txq = netdev_get_tx_queue(dev, tqi); bdp = tx_queue->dirty_tx; skb_dirtytx = tx_queue->skb_dirtytx; @@ -2483,8 +2527,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) frags = skb_shinfo(skb)->nr_frags; - /* - * When time stamping, one additional TxBD must be freed. + /* When time stamping, one additional TxBD must be freed. * Also, we need to dma_unmap_single() the TxPAL. */ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) @@ -2498,7 +2541,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) /* Only clean completed frames */ if ((lstatus & BD_LFLAG(TXBD_READY)) && - (lstatus & BD_LENGTH_MASK)) + (lstatus & BD_LENGTH_MASK)) break; if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { @@ -2507,12 +2550,13 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) } else buflen = bdp->length; - dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, - buflen, DMA_TO_DEVICE); + dma_unmap_single(priv->dev, bdp->bufPtr, + buflen, DMA_TO_DEVICE); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { struct skb_shared_hwtstamps shhwtstamps; u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(*ns); skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); @@ -2525,30 +2569,20 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) bdp = next_txbd(bdp, base, tx_ring_size); for (i = 0; i < frags; i++) { - dma_unmap_page(&priv->ofdev->dev, - bdp->bufPtr, - bdp->length, - DMA_TO_DEVICE); + dma_unmap_page(priv->dev, bdp->bufPtr, + bdp->length, DMA_TO_DEVICE); bdp->lstatus &= BD_LFLAG(TXBD_WRAP); bdp = next_txbd(bdp, base, tx_ring_size); } - /* - * If there's room in the queue (limit it to rx_buffer_size) - * we add this skb back into the pool, if it's the right size - */ - if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && - skb_recycle_check(skb, priv->rx_buffer_size + - RXBUF_ALIGNMENT)) { - gfar_align_skb(skb); - skb_queue_head(&priv->rx_recycle, skb); - } else - dev_kfree_skb_any(skb); + bytes_sent += GFAR_CB(skb)->bytes_sent; + + dev_kfree_skb_any(skb); tx_queue->tx_skbuff[skb_dirtytx] = NULL; skb_dirtytx = (skb_dirtytx + 1) & - TX_RING_MOD_MASK(tx_ring_size); + TX_RING_MOD_MASK(tx_ring_size); howmany++; spin_lock_irqsave(&tx_queue->txlock, flags); @@ -2557,58 +2591,34 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) } /* If we freed a buffer, we can restart transmission, if necessary */ - if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) - netif_wake_subqueue(dev, tx_queue->qindex); + if (tx_queue->num_txbdfree && + netif_tx_queue_stopped(txq) && + !(test_bit(GFAR_DOWN, &priv->state))) + netif_wake_subqueue(priv->ndev, tqi); /* Update dirty indicators */ tx_queue->skb_dirtytx = skb_dirtytx; tx_queue->dirty_tx = bdp; - return howmany; -} - -static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) -{ - unsigned long flags; - - spin_lock_irqsave(&gfargrp->grplock, flags); - if (napi_schedule_prep(&gfargrp->napi)) { - gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); - __napi_schedule(&gfargrp->napi); - } else { - /* - * Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived. - */ - gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); - } - spin_unlock_irqrestore(&gfargrp->grplock, flags); - -} - -/* Interrupt Handler for Transmit complete */ -static irqreturn_t gfar_transmit(int irq, void *grp_id) -{ - gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); - return IRQ_HANDLED; + netdev_tx_completed_queue(txq, howmany, bytes_sent); } static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, - struct sk_buff *skb) + struct sk_buff *skb) { struct net_device *dev = rx_queue->dev; struct gfar_private *priv = netdev_priv(dev); dma_addr_t buf; - buf = dma_map_single(&priv->ofdev->dev, skb->data, + buf = dma_map_single(priv->dev, skb->data, priv->rx_buffer_size, DMA_FROM_DEVICE); gfar_init_rxbdp(rx_queue, bdp, buf); } -static struct sk_buff * gfar_alloc_skb(struct net_device *dev) +static struct sk_buff *gfar_alloc_skb(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb = NULL; + struct sk_buff *skb; skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); if (!skb) @@ -2619,16 +2629,9 @@ static struct sk_buff * gfar_alloc_skb(struct net_device *dev) return skb; } -struct sk_buff * gfar_new_skb(struct net_device *dev) +struct sk_buff *gfar_new_skb(struct net_device *dev) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb = NULL; - - skb = skb_dequeue(&priv->rx_recycle); - if (!skb) - skb = gfar_alloc_skb(dev); - - return skb; + return gfar_alloc_skb(dev); } static inline void count_errors(unsigned short status, struct net_device *dev) @@ -2637,12 +2640,11 @@ static inline void count_errors(unsigned short status, struct net_device *dev) struct net_device_stats *stats = &dev->stats; struct gfar_extra_stats *estats = &priv->extra_stats; - /* If the packet was truncated, none of the other errors - * matter */ + /* If the packet was truncated, none of the other errors matter */ if (status & RXBD_TRUNCATED) { stats->rx_length_errors++; - estats->rx_trunc++; + atomic64_inc(&estats->rx_trunc); return; } @@ -2651,27 +2653,68 @@ static inline void count_errors(unsigned short status, struct net_device *dev) stats->rx_length_errors++; if (status & RXBD_LARGE) - estats->rx_large++; + atomic64_inc(&estats->rx_large); else - estats->rx_short++; + atomic64_inc(&estats->rx_short); } if (status & RXBD_NONOCTET) { stats->rx_frame_errors++; - estats->rx_nonoctet++; + atomic64_inc(&estats->rx_nonoctet); } if (status & RXBD_CRCERR) { - estats->rx_crcerr++; + atomic64_inc(&estats->rx_crcerr); stats->rx_crc_errors++; } if (status & RXBD_OVERRUN) { - estats->rx_overrun++; + atomic64_inc(&estats->rx_overrun); stats->rx_crc_errors++; } } irqreturn_t gfar_receive(int irq, void *grp_id) { - gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); + struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; + unsigned long flags; + u32 imask; + + if (likely(napi_schedule_prep(&grp->napi_rx))) { + spin_lock_irqsave(&grp->grplock, flags); + imask = gfar_read(&grp->regs->imask); + imask &= IMASK_RX_DISABLED; + gfar_write(&grp->regs->imask, imask); + spin_unlock_irqrestore(&grp->grplock, flags); + __napi_schedule(&grp->napi_rx); + } else { + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived. + */ + gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); + } + + return IRQ_HANDLED; +} + +/* Interrupt Handler for Transmit complete */ +static irqreturn_t gfar_transmit(int irq, void *grp_id) +{ + struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; + unsigned long flags; + u32 imask; + + if (likely(napi_schedule_prep(&grp->napi_tx))) { + spin_lock_irqsave(&grp->grplock, flags); + imask = gfar_read(&grp->regs->imask); + imask &= IMASK_TX_DISABLED; + gfar_write(&grp->regs->imask, imask); + spin_unlock_irqrestore(&grp->grplock, flags); + __napi_schedule(&grp->napi_tx); + } else { + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived. + */ + gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); + } + return IRQ_HANDLED; } @@ -2679,7 +2722,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) { /* If valid headers were found, and valid sums * were verified, then we tell the kernel that no - * checksumming is necessary. Otherwise, it is */ + * checksumming is necessary. Otherwise, it is [FIXME] + */ if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) skb->ip_summed = CHECKSUM_UNNECESSARY; else @@ -2687,21 +2731,19 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) } -/* gfar_process_frame() -- handle one incoming packet if skb - * isn't NULL. */ -static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull) +/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ +static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int amount_pull, struct napi_struct *napi) { struct gfar_private *priv = netdev_priv(dev); struct rxfcb *fcb = NULL; - int ret; - /* fcb is at the beginning if exists */ fcb = (struct rxfcb *)skb->data; - /* Remove the FCB from the skb */ - /* Remove the padded bytes, if there are any */ + /* Remove the FCB from the skb + * Remove the padded bytes, if there are any + */ if (amount_pull) { skb_record_rx_queue(skb, fcb->rq); skb_pull(skb, amount_pull); @@ -2711,6 +2753,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, if (priv->hwts_rx_en) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); u64 *ns = (u64 *) skb->data; + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); shhwtstamps->hwtstamp = ns_to_ktime(*ns); } @@ -2724,27 +2767,22 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, /* Tell the skb what kind of packet this is */ skb->protocol = eth_type_trans(skb, dev); - /* - * There's need to check for NETIF_F_HW_VLAN_RX here. + /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. */ - if (dev->features & NETIF_F_HW_VLAN_RX && + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && fcb->flags & RXFCB_VLN) - __vlan_hwaccel_put_tag(skb, fcb->vlctl); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl); /* Send the packet up the stack */ - ret = netif_receive_skb(skb); + napi_gro_receive(napi, skb); - if (NET_RX_DROP == ret) - priv->extra_stats.kernel_dropped++; - - return 0; } /* gfar_clean_rx_ring() -- Processes each frame in the rx ring - * until the budget/quota has been reached. Returns the number - * of frames handled + * until the budget/quota has been reached. Returns the number + * of frames handled */ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) { @@ -2760,10 +2798,11 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) bdp = rx_queue->cur_rx; base = rx_queue->rx_bd_base; - amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); + amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { struct sk_buff *newskb; + rmb(); /* Add another skb for the future */ @@ -2771,22 +2810,22 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; - dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, - priv->rx_buffer_size, DMA_FROM_DEVICE); + dma_unmap_single(priv->dev, bdp->bufPtr, + priv->rx_buffer_size, DMA_FROM_DEVICE); if (unlikely(!(bdp->status & RXBD_ERR) && - bdp->length > priv->rx_buffer_size)) + bdp->length > priv->rx_buffer_size)) bdp->status = RXBD_LARGE; /* We drop the frame if we failed to allocate a new buffer */ if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || - bdp->status & RXBD_ERR)) { + bdp->status & RXBD_ERR)) { count_errors(bdp->status, dev); if (unlikely(!newskb)) newskb = skb; else if (skb) - skb_queue_head(&priv->rx_recycle, skb); + dev_kfree_skb(skb); } else { /* Increment the number of packets */ rx_queue->stats.rx_packets++; @@ -2798,12 +2837,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) skb_put(skb, pkt_len); rx_queue->stats.rx_bytes += pkt_len; skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(dev, skb, amount_pull); + gfar_process_frame(dev, skb, amount_pull, + &rx_queue->grp->napi_rx); } else { netif_warn(priv, rx_err, dev, "Missing skb!\n"); rx_queue->stats.rx_dropped++; - priv->extra_stats.rx_skbmissing++; + atomic64_inc(&priv->extra_stats.rx_skbmissing); } } @@ -2817,9 +2857,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) bdp = next_bd(bdp, base, rx_queue->rx_ring_size); /* update to point at the next skb */ - rx_queue->skb_currx = - (rx_queue->skb_currx + 1) & - RX_RING_MOD_MASK(rx_queue->rx_ring_size); + rx_queue->skb_currx = (rx_queue->skb_currx + 1) & + RX_RING_MOD_MASK(rx_queue->rx_ring_size); } /* Update the current rxbd pointer to be the next one */ @@ -2828,99 +2867,196 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) return howmany; } -static int gfar_poll(struct napi_struct *napi, int budget) +static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_rx); + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; + int work_done = 0; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_RX_MASK); + + work_done = gfar_clean_rx_ring(rx_queue, budget); + + if (work_done < budget) { + u32 imask; + napi_complete(napi); + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_RX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + } + + return work_done; +} + +static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_tx); + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; + u32 imask; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_TX_MASK); + + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) + gfar_clean_tx_ring(tx_queue); + + napi_complete(napi); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_TX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + + return 0; +} + +static int gfar_poll_rx(struct napi_struct *napi, int budget) { - struct gfar_priv_grp *gfargrp = container_of(napi, - struct gfar_priv_grp, napi); + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_rx); struct gfar_private *priv = gfargrp->priv; struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; - int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; - int tx_cleaned = 0, i, left_over_budget = budget; - unsigned long serviced_queues = 0; - int num_queues = 0; - - num_queues = gfargrp->num_rx_queues; - budget_per_queue = budget/num_queues; + int work_done = 0, work_done_per_q = 0; + int i, budget_per_q = 0; + unsigned long rstat_rxf; + int num_act_queues; /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived */ - gfar_write(®s->ievent, IEVENT_RTX_MASK); - - while (num_queues && left_over_budget) { - - budget_per_queue = left_over_budget/num_queues; - left_over_budget = 0; - - for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { - if (test_bit(i, &serviced_queues)) - continue; - rx_queue = priv->rx_queue[i]; - tx_queue = priv->tx_queue[rx_queue->qindex]; - - tx_cleaned += gfar_clean_tx_ring(tx_queue); - rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, - budget_per_queue); - rx_cleaned += rx_cleaned_per_queue; - if(rx_cleaned_per_queue < budget_per_queue) { - left_over_budget = left_over_budget + - (budget_per_queue - rx_cleaned_per_queue); - set_bit(i, &serviced_queues); - num_queues--; - } + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_RX_MASK); + + rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; + + num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); + if (num_act_queues) + budget_per_q = budget/num_act_queues; + + for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { + /* skip queue if not active */ + if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) + continue; + + rx_queue = priv->rx_queue[i]; + work_done_per_q = + gfar_clean_rx_ring(rx_queue, budget_per_q); + work_done += work_done_per_q; + + /* finished processing this queue */ + if (work_done_per_q < budget_per_q) { + /* clear active queue hw indication */ + gfar_write(®s->rstat, + RSTAT_CLEAR_RXF0 >> i); + num_act_queues--; + + if (!num_act_queues) + break; } } - if (tx_cleaned) - return budget; - - if (rx_cleaned < budget) { + if (!num_act_queues) { + u32 imask; napi_complete(napi); /* Clear the halt bit in RSTAT */ gfar_write(®s->rstat, gfargrp->rstat); - gfar_write(®s->imask, IMASK_DEFAULT); + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_RX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + } + + return work_done; +} + +static int gfar_poll_tx(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_tx); + struct gfar_private *priv = gfargrp->priv; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = NULL; + int has_tx_work = 0; + int i; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_TX_MASK); + + for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { + tx_queue = priv->tx_queue[i]; + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { + gfar_clean_tx_ring(tx_queue); + has_tx_work = 1; + } + } + + if (!has_tx_work) { + u32 imask; + napi_complete(napi); - /* If we are coalescing interrupts, update the timer */ - /* Otherwise, clear it */ - gfar_configure_coalescing(priv, - gfargrp->rx_bit_map, gfargrp->tx_bit_map); + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_TX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); } - return rx_cleaned; + return 0; } + #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs +/* Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void gfar_netpoll(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - int i = 0; + int i; /* If the device has multiple interrupts, run tx/rx */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { for (i = 0; i < priv->num_grps; i++) { - disable_irq(priv->gfargrp[i].interruptTransmit); - disable_irq(priv->gfargrp[i].interruptReceive); - disable_irq(priv->gfargrp[i].interruptError); - gfar_interrupt(priv->gfargrp[i].interruptTransmit, - &priv->gfargrp[i]); - enable_irq(priv->gfargrp[i].interruptError); - enable_irq(priv->gfargrp[i].interruptReceive); - enable_irq(priv->gfargrp[i].interruptTransmit); + struct gfar_priv_grp *grp = &priv->gfargrp[i]; + + disable_irq(gfar_irq(grp, TX)->irq); + disable_irq(gfar_irq(grp, RX)->irq); + disable_irq(gfar_irq(grp, ER)->irq); + gfar_interrupt(gfar_irq(grp, TX)->irq, grp); + enable_irq(gfar_irq(grp, ER)->irq); + enable_irq(gfar_irq(grp, RX)->irq); + enable_irq(gfar_irq(grp, TX)->irq); } } else { for (i = 0; i < priv->num_grps; i++) { - disable_irq(priv->gfargrp[i].interruptTransmit); - gfar_interrupt(priv->gfargrp[i].interruptTransmit, - &priv->gfargrp[i]); - enable_irq(priv->gfargrp[i].interruptTransmit); + struct gfar_priv_grp *grp = &priv->gfargrp[i]; + + disable_irq(gfar_irq(grp, TX)->irq); + gfar_interrupt(gfar_irq(grp, TX)->irq, grp); + enable_irq(gfar_irq(grp, TX)->irq); } } } @@ -2958,85 +3094,19 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id) static void adjust_link(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned long flags; struct phy_device *phydev = priv->phydev; - int new_state = 0; - - local_irq_save(flags); - lock_tx_qs(priv); - - if (phydev->link) { - u32 tempval = gfar_read(®s->maccfg2); - u32 ecntrl = gfar_read(®s->ecntrl); - - /* Now we make sure that we can be in full duplex mode. - * If not, we operate in half-duplex mode. */ - if (phydev->duplex != priv->oldduplex) { - new_state = 1; - if (!(phydev->duplex)) - tempval &= ~(MACCFG2_FULL_DUPLEX); - else - tempval |= MACCFG2_FULL_DUPLEX; - - priv->oldduplex = phydev->duplex; - } - if (phydev->speed != priv->oldspeed) { - new_state = 1; - switch (phydev->speed) { - case 1000: - tempval = - ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); - - ecntrl &= ~(ECNTRL_R100); - break; - case 100: - case 10: - tempval = - ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); - - /* Reduced mode distinguishes - * between 10 and 100 */ - if (phydev->speed == SPEED_100) - ecntrl |= ECNTRL_R100; - else - ecntrl &= ~(ECNTRL_R100); - break; - default: - netif_warn(priv, link, dev, - "Ack! Speed (%d) is not 10/100/1000!\n", - phydev->speed); - break; - } - - priv->oldspeed = phydev->speed; - } - - gfar_write(®s->maccfg2, tempval); - gfar_write(®s->ecntrl, ecntrl); - - if (!priv->oldlink) { - new_state = 1; - priv->oldlink = 1; - } - } else if (priv->oldlink) { - new_state = 1; - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; - } - - if (new_state && netif_msg_link(priv)) - phy_print_status(phydev); - unlock_tx_qs(priv); - local_irq_restore(flags); + if (unlikely(phydev->link != priv->oldlink || + phydev->duplex != priv->oldduplex || + phydev->speed != priv->oldspeed)) + gfar_update_link_state(priv); } /* Update the hash table based on the current list of multicast * addresses we subscribe to. Also, change the promiscuity of * the device based on the flags (this function is called - * whenever dev->flags is changed */ + * whenever dev->flags is changed + */ static void gfar_set_multi(struct net_device *dev) { struct netdev_hw_addr *ha; @@ -3098,7 +3168,8 @@ static void gfar_set_multi(struct net_device *dev) /* If we have extended hash tables, we need to * clear the exact match registers to prepare for - * setting them */ + * setting them + */ if (priv->extended_hash) { em_num = GFAR_EM_NUM + 1; gfar_clear_exact_match(dev); @@ -3124,13 +3195,14 @@ static void gfar_set_multi(struct net_device *dev) /* Clears each of the exact match registers to zero, so they - * don't interfere with normal reception */ + * don't interfere with normal reception + */ static void gfar_clear_exact_match(struct net_device *dev) { int idx; static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; - for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) + for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) gfar_set_mac_for_addr(dev, idx, zero_arr); } @@ -3146,7 +3218,8 @@ static void gfar_clear_exact_match(struct net_device *dev) * hash index which gaddr register to use, and the 5 other bits * indicate which bit (assuming an IBM numbering scheme, which * for PowerPC (tm) is usually the case) in the register holds - * the entry. */ + * the entry. + */ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) { u32 tempval; @@ -3178,8 +3251,9 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, macptr += num*2; - /* Now copy it into the mac registers backwards, cuz */ - /* little endian is silly */ + /* Now copy it into the mac registers backwards, cuz + * little endian is silly + */ for (idx = 0; idx < ETH_ALEN; idx++) tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; @@ -3211,7 +3285,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id) /* Hmm... */ if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) - netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n", + netdev_dbg(dev, + "error interrupt (ievent=0x%08x imask=0x%08x)\n", events, gfar_read(®s->imask)); /* Update the error counters */ @@ -3228,7 +3303,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id) netif_dbg(priv, tx_err, dev, "TX FIFO underrun, packet dropped\n"); dev->stats.tx_dropped++; - priv->extra_stats.tx_underrun++; + atomic64_inc(&priv->extra_stats.tx_underrun); local_irq_save(flags); lock_tx_qs(priv); @@ -3243,7 +3318,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id) } if (events & IEVENT_BSY) { dev->stats.rx_errors++; - priv->extra_stats.rx_bsy++; + atomic64_inc(&priv->extra_stats.rx_bsy); gfar_receive(irq, grp_id); @@ -3252,24 +3327,132 @@ static irqreturn_t gfar_error(int irq, void *grp_id) } if (events & IEVENT_BABR) { dev->stats.rx_errors++; - priv->extra_stats.rx_babr++; + atomic64_inc(&priv->extra_stats.rx_babr); netif_dbg(priv, rx_err, dev, "babbling RX error\n"); } if (events & IEVENT_EBERR) { - priv->extra_stats.eberr++; + atomic64_inc(&priv->extra_stats.eberr); netif_dbg(priv, rx_err, dev, "bus error\n"); } if (events & IEVENT_RXC) netif_dbg(priv, rx_status, dev, "control frame\n"); if (events & IEVENT_BABT) { - priv->extra_stats.tx_babt++; + atomic64_inc(&priv->extra_stats.tx_babt); netif_dbg(priv, tx_err, dev, "babbling TX error\n"); } return IRQ_HANDLED; } +static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) +{ + struct phy_device *phydev = priv->phydev; + u32 val = 0; + + if (!phydev->duplex) + return val; + + if (!priv->pause_aneg_en) { + if (priv->tx_pause_en) + val |= MACCFG1_TX_FLOW; + if (priv->rx_pause_en) + val |= MACCFG1_RX_FLOW; + } else { + u16 lcl_adv, rmt_adv; + u8 flowctrl; + /* get link partner capabilities */ + rmt_adv = 0; + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + + lcl_adv = mii_advertise_flowctrl(phydev->advertising); + + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (flowctrl & FLOW_CTRL_TX) + val |= MACCFG1_TX_FLOW; + if (flowctrl & FLOW_CTRL_RX) + val |= MACCFG1_RX_FLOW; + } + + return val; +} + +static noinline void gfar_update_link_state(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + struct phy_device *phydev = priv->phydev; + + if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) + return; + + if (phydev->link) { + u32 tempval1 = gfar_read(®s->maccfg1); + u32 tempval = gfar_read(®s->maccfg2); + u32 ecntrl = gfar_read(®s->ecntrl); + + if (phydev->duplex != priv->oldduplex) { + if (!(phydev->duplex)) + tempval &= ~(MACCFG2_FULL_DUPLEX); + else + tempval |= MACCFG2_FULL_DUPLEX; + + priv->oldduplex = phydev->duplex; + } + + if (phydev->speed != priv->oldspeed) { + switch (phydev->speed) { + case 1000: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + + ecntrl &= ~(ECNTRL_R100); + break; + case 100: + case 10: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); + + /* Reduced mode distinguishes + * between 10 and 100 + */ + if (phydev->speed == SPEED_100) + ecntrl |= ECNTRL_R100; + else + ecntrl &= ~(ECNTRL_R100); + break; + default: + netif_warn(priv, link, priv->ndev, + "Ack! Speed (%d) is not 10/100/1000!\n", + phydev->speed); + break; + } + + priv->oldspeed = phydev->speed; + } + + tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + tempval1 |= gfar_get_flowctrl_cfg(priv); + + gfar_write(®s->maccfg1, tempval1); + gfar_write(®s->maccfg2, tempval); + gfar_write(®s->ecntrl, ecntrl); + + if (!priv->oldlink) + priv->oldlink = 1; + + } else if (priv->oldlink) { + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + } + + if (netif_msg_link(priv)) + phy_print_status(phydev); +} + static struct of_device_id gfar_match[] = { { diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 40c33a7554c..84632c569f2 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1,5 +1,5 @@ /* - * drivers/net/gianfar.h + * drivers/net/ethernet/freescale/gianfar.h * * Gianfar Ethernet Driver * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 @@ -9,7 +9,7 @@ * Maintainer: Kumar Gala * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> * - * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. + * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -29,7 +29,6 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -78,11 +77,8 @@ struct ethtool_rx_list { #define INCREMENTAL_BUFFER_SIZE 512 #define PHY_INIT_TIMEOUT 100000 -#define GFAR_PHY_CHANGE_TIME 2 -#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.2, " #define DRV_NAME "gfar-enet" -extern const char gfar_driver_name[]; extern const char gfar_driver_version[]; /* MAXIMUM NUMBER OF QUEUES SUPPORTED */ @@ -149,6 +145,10 @@ extern const char gfar_driver_version[]; | SUPPORTED_Autoneg \ | SUPPORTED_MII) +#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \ + | SUPPORTED_Pause \ + | SUPPORTED_Asym_Pause) + /* TBI register addresses */ #define MII_TBICON 0x11 @@ -294,7 +294,9 @@ extern const char gfar_driver_version[]; #define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK) -#define RSTAT_CLEAR_RHALT 0x00800000 +#define RSTAT_CLEAR_RHALT 0x00800000 +#define RSTAT_CLEAR_RXF0 0x00000080 +#define RSTAT_RXF_MASK 0x000000ff #define TCTRL_IPCSEN 0x00004000 #define TCTRL_TUCSEN 0x00002000 @@ -304,8 +306,16 @@ extern const char gfar_driver_version[]; #define TCTRL_TFCPAUSE 0x00000008 #define TCTRL_TXSCHED_MASK 0x00000006 #define TCTRL_TXSCHED_INIT 0x00000000 +/* priority scheduling */ #define TCTRL_TXSCHED_PRIO 0x00000002 +/* weighted round-robin scheduling (WRRS) */ #define TCTRL_TXSCHED_WRRS 0x00000004 +/* default WRRS weight and policy setting, + * tailored to the tr03wt and tr47wt registers: + * equal weight for all Tx Qs, measured in 64byte units + */ +#define DEFAULT_WRRS_WEIGHT 0x18181818 + #define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN) #define IEVENT_INIT_CLEAR 0xffffffff @@ -367,8 +377,11 @@ extern const char gfar_driver_version[]; IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ | IMASK_PERR) -#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \ - & IMASK_DEFAULT) +#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY) +#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN) + +#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT) +#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT) /* Fifo management */ #define FIFO_TX_THR_MASK 0x01ff @@ -399,7 +412,9 @@ extern const char gfar_driver_version[]; /* This default RIR value directly corresponds * to the 3-bit hash value generated */ -#define DEFAULT_RIR0 0x05397700 +#define DEFAULT_8RXQ_RIR0 0x05397700 +/* Map even hash values to Q0, and odd ones to Q1 */ +#define DEFAULT_2RXQ_RIR0 0x04104100 /* RQFCR register bits */ #define RQFCR_GPI 0x80000000 @@ -520,7 +535,7 @@ extern const char gfar_driver_version[]; #define RXFCB_PERR_MASK 0x000c #define RXFCB_PERR_BADL3 0x0008 -#define GFAR_INT_NAME_MAX IFNAMSIZ + 4 +#define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */ struct txbd8 { @@ -564,7 +579,7 @@ struct rxfcb { }; struct gianfar_skb_cb { - int alignamount; + unsigned int bytes_sent; /* bytes-on-wire (i.e. no FCB) */ }; #define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb)) @@ -622,36 +637,28 @@ struct rmon_mib }; struct gfar_extra_stats { - u64 kernel_dropped; - u64 rx_large; - u64 rx_short; - u64 rx_nonoctet; - u64 rx_crcerr; - u64 rx_overrun; - u64 rx_bsy; - u64 rx_babr; - u64 rx_trunc; - u64 eberr; - u64 tx_babt; - u64 tx_underrun; - u64 rx_skbmissing; - u64 tx_timeout; + atomic64_t rx_large; + atomic64_t rx_short; + atomic64_t rx_nonoctet; + atomic64_t rx_crcerr; + atomic64_t rx_overrun; + atomic64_t rx_bsy; + atomic64_t rx_babr; + atomic64_t rx_trunc; + atomic64_t eberr; + atomic64_t tx_babt; + atomic64_t tx_underrun; + atomic64_t rx_skbmissing; + atomic64_t tx_timeout; }; #define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32)) -#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64)) +#define GFAR_EXTRA_STATS_LEN \ + (sizeof(struct gfar_extra_stats)/sizeof(atomic64_t)) -/* Number of stats in the stats structure (ignore car and cam regs)*/ +/* Number of stats exported via ethtool */ #define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN) -#define GFAR_INFOSTR_LEN 32 - -struct gfar_stats { - u64 extra[GFAR_EXTRA_STATS_LEN]; - u64 rmon[GFAR_RMON_LEN]; -}; - - struct gfar { u32 tsec_id; /* 0x.000 - Controller ID register */ u32 tsec_id2; /* 0x.004 - Controller ID2 register */ @@ -878,7 +885,6 @@ struct gfar { #define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010 #define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020 #define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040 -#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080 #define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 #define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 @@ -890,8 +896,8 @@ struct gfar { #define DEFAULT_MAPPING 0xFF #endif -#define ISRG_SHIFT_TX 0x10 -#define ISRG_SHIFT_RX 0x18 +#define ISRG_RR0 0x80000000 +#define ISRG_TR0 0x00800000 /* The same driver can operate in two modes */ /* SQ_SG_MODE: Single Queue Single Group Mode @@ -903,6 +909,22 @@ enum { MQ_MG_MODE }; +/* GFAR_SQ_POLLING: Single Queue NAPI polling mode + * The driver supports a single pair of RX/Tx queues + * per interrupt group (Rx/Tx int line). MQ_MG mode + * devices have 2 interrupt groups, so the device will + * have a total of 2 Tx and 2 Rx queues in this case. + * GFAR_MQ_POLLING: Multi Queue NAPI polling mode + * The driver supports all the 8 Rx and Tx HW queues + * each queue mapped by the Device Tree to one of + * the 2 interrupt groups. This mode implies significant + * processing overhead (CPU and controller level). + */ +enum gfar_poll_mode { + GFAR_SQ_POLLING = 0, + GFAR_MQ_POLLING +}; + /* * Per TX queue stats */ @@ -932,26 +954,25 @@ struct tx_q_stats { * @txtime: coalescing value if based on time */ struct gfar_priv_tx_q { + /* cacheline 1 */ spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES))); - struct sk_buff ** tx_skbuff; - /* Buffer descriptor pointers */ - dma_addr_t tx_bd_dma_base; struct txbd8 *tx_bd_base; struct txbd8 *cur_tx; - struct txbd8 *dirty_tx; + unsigned int num_txbdfree; + unsigned short skb_curtx; + unsigned short tx_ring_size; struct tx_q_stats stats; - struct net_device *dev; struct gfar_priv_grp *grp; - u16 skb_curtx; - u16 skb_dirtytx; - u16 qindex; - unsigned int tx_ring_size; - unsigned int num_txbdfree; + /* cacheline 2 */ + struct net_device *dev; + struct sk_buff **tx_skbuff; + struct txbd8 *dirty_tx; + unsigned short skb_dirtytx; + unsigned short qindex; /* Configuration info for the coalescing features */ - unsigned char txcoalescing; + unsigned int txcoalescing; unsigned long txic; - unsigned short txcount; - unsigned short txtime; + dma_addr_t tx_bd_dma_base; }; /* @@ -965,7 +986,6 @@ struct rx_q_stats { /** * struct gfar_priv_rx_q - per rx queue structure - * @rxlock: per queue rx spin lock * @rx_skbuff: skb pointers * @skb_currx: currently use skb pointer * @rx_bd_base: First rx buffer descriptor @@ -978,8 +998,7 @@ struct rx_q_stats { */ struct gfar_priv_rx_q { - spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES))); - struct sk_buff ** rx_skbuff; + struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES); dma_addr_t rx_bd_dma_base; struct rxbd8 *rx_bd_base; struct rxbd8 *cur_rx; @@ -994,43 +1013,48 @@ struct gfar_priv_rx_q { unsigned long rxic; }; +enum gfar_irqinfo_id { + GFAR_TX = 0, + GFAR_RX = 1, + GFAR_ER = 2, + GFAR_NUM_IRQS = 3 +}; + +struct gfar_irqinfo { + unsigned int irq; + char name[GFAR_INT_NAME_MAX]; +}; + /** * struct gfar_priv_grp - per group structure * @napi: the napi poll function * @priv: back pointer to the priv structure * @regs: the ioremapped register space for this group - * @grp_id: group id for this group - * @interruptTransmit: The TX interrupt number for this group - * @interruptReceive: The RX interrupt number for this group - * @interruptError: The ERROR interrupt number for this group - * @int_name_tx: tx interrupt name for this group - * @int_name_rx: rx interrupt name for this group - * @int_name_er: er interrupt name for this group + * @irqinfo: TX/RX/ER irq data for this group */ struct gfar_priv_grp { - spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES))); - struct napi_struct napi; - struct gfar_private *priv; + spinlock_t grplock __aligned(SMP_CACHE_BYTES); + struct napi_struct napi_rx; + struct napi_struct napi_tx; struct gfar __iomem *regs; - unsigned int grp_id; - unsigned long rx_bit_map; - unsigned long tx_bit_map; + struct gfar_priv_tx_q *tx_queue; + struct gfar_priv_rx_q *rx_queue; + unsigned int tstat; + unsigned int rstat; + + struct gfar_private *priv; unsigned long num_tx_queues; + unsigned long tx_bit_map; unsigned long num_rx_queues; - unsigned int rstat; - unsigned int tstat; - unsigned int imask; - unsigned int ievent; - unsigned int interruptTransmit; - unsigned int interruptReceive; - unsigned int interruptError; - - char int_name_tx[GFAR_INT_NAME_MAX]; - char int_name_rx[GFAR_INT_NAME_MAX]; - char int_name_er[GFAR_INT_NAME_MAX]; + unsigned long rx_bit_map; + + struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS]; }; +#define gfar_irq(grp, ID) \ + ((grp)->irqinfo[GFAR_##ID]) + enum gfar_errata { GFAR_ERRATA_74 = 0x01, GFAR_ERRATA_76 = 0x02, @@ -1038,6 +1062,11 @@ enum gfar_errata { GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */ }; +enum gfar_dev_state { + GFAR_DOWN = 1, + GFAR_RESETTING +}; + /* Struct stolen almost completely (and shamelessly) from the FCC enet source * (Ok, that's not so true anymore, but there is a family resemblance) * The GFAR buffer descriptors track the ring buffers. The rx_bd_base @@ -1048,79 +1077,85 @@ enum gfar_errata { * the buffer descriptor determines the actual condition. */ struct gfar_private { - - /* Indicates how many tx, rx queues are enabled */ - unsigned int num_tx_queues; - unsigned int num_rx_queues; - unsigned int num_grps; - unsigned int mode; - - /* The total tx and rx ring size for the enabled queues */ - unsigned int total_tx_ring_size; - unsigned int total_rx_ring_size; - - struct device_node *node; + struct device *dev; struct net_device *ndev; - struct platform_device *ofdev; enum gfar_errata errata; - - struct gfar_priv_grp gfargrp[MAXGROUPS]; - struct gfar_priv_tx_q *tx_queue[MAX_TX_QS]; - struct gfar_priv_rx_q *rx_queue[MAX_RX_QS]; - - /* RX per device parameters */ unsigned int rx_buffer_size; - unsigned int rx_stash_size; - unsigned int rx_stash_index; - u32 cur_filer_idx; + u16 uses_rxfcb; + u16 padding; + u32 device_flags; - struct sk_buff_head rx_recycle; + /* HW time stamping enabled flag */ + int hwts_rx_en; + int hwts_tx_en; - /* RX queue filer rule set*/ - struct ethtool_rx_list rx_list; - struct mutex rx_queue_access; + struct gfar_priv_tx_q *tx_queue[MAX_TX_QS]; + struct gfar_priv_rx_q *rx_queue[MAX_RX_QS]; + struct gfar_priv_grp gfargrp[MAXGROUPS]; - /* Hash registers and their width */ - u32 __iomem *hash_regs[16]; - int hash_width; + unsigned long state; - /* global parameters */ - unsigned int fifo_threshold; - unsigned int fifo_starve; - unsigned int fifo_starve_off; + unsigned short mode; + unsigned short poll_mode; + unsigned int num_tx_queues; + unsigned int num_rx_queues; + unsigned int num_grps; - /* Bitfield update lock */ - spinlock_t bflock; + /* Network Statistics */ + struct gfar_extra_stats extra_stats; + /* PHY stuff */ phy_interface_t interface; struct device_node *phy_node; struct device_node *tbi_node; - u32 device_flags; - unsigned char - extended_hash:1, - bd_stash_en:1, - rx_filer_enable:1, - wol_en:1; /* Wake-on-LAN enabled */ - unsigned short padding; - - /* PHY stuff */ struct phy_device *phydev; struct mii_bus *mii_bus; int oldspeed; int oldduplex; int oldlink; + /* Bitfield update lock */ + spinlock_t bflock; + uint32_t msg_enable; struct work_struct reset_task; - /* Network Statistics */ - struct gfar_extra_stats extra_stats; + struct platform_device *ofdev; + unsigned char + extended_hash:1, + bd_stash_en:1, + rx_filer_enable:1, + /* Wake-on-LAN enabled */ + wol_en:1, + /* Enable priorty based Tx scheduling in Hw */ + prio_sched_en:1, + /* Flow control flags */ + pause_aneg_en:1, + tx_pause_en:1, + rx_pause_en:1; - /* HW time stamping enabled flag */ - int hwts_rx_en; - int hwts_tx_en; + /* The total tx and rx ring size for the enabled queues */ + unsigned int total_tx_ring_size; + unsigned int total_rx_ring_size; + + u32 rqueue; + u32 tqueue; + + /* RX per device parameters */ + unsigned int rx_stash_size; + unsigned int rx_stash_index; + + u32 cur_filer_idx; + + /* RX queue filer rule set*/ + struct ethtool_rx_list rx_list; + struct mutex rx_queue_access; + + /* Hash registers and their width */ + u32 __iomem *hash_regs[16]; + int hash_width; /*Filer table*/ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; @@ -1134,16 +1169,16 @@ static inline int gfar_has_errata(struct gfar_private *priv, return priv->errata & err; } -static inline u32 gfar_read(volatile unsigned __iomem *addr) +static inline u32 gfar_read(unsigned __iomem *addr) { u32 val; - val = in_be32(addr); + val = ioread32be(addr); return val; } -static inline void gfar_write(volatile unsigned __iomem *addr, u32 val) +static inline void gfar_write(unsigned __iomem *addr, u32 val) { - out_be32(addr, val); + iowrite32be(val, addr); } static inline void gfar_write_filer(struct gfar_private *priv, @@ -1166,22 +1201,42 @@ static inline void gfar_read_filer(struct gfar_private *priv, *fpr = gfar_read(®s->rqfpr); } -extern void lock_rx_qs(struct gfar_private *priv); -extern void lock_tx_qs(struct gfar_private *priv); -extern void unlock_rx_qs(struct gfar_private *priv); -extern void unlock_tx_qs(struct gfar_private *priv); -extern irqreturn_t gfar_receive(int irq, void *dev_id); -extern int startup_gfar(struct net_device *dev); -extern void stop_gfar(struct net_device *dev); -extern void gfar_halt(struct net_device *dev); -extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, - int enable, u32 regnum, u32 read); -extern void gfar_configure_coalescing(struct gfar_private *priv, - unsigned long tx_mask, unsigned long rx_mask); -void gfar_init_sysfs(struct net_device *dev); +static inline void gfar_write_isrg(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr = ®s->isrg0; + u32 isrg = 0; + int grp_idx, i; + + for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { + struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx]; + + for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { + isrg |= (ISRG_RR0 >> i); + } + + for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { + isrg |= (ISRG_TR0 >> i); + } + + gfar_write(baddr, isrg); + + baddr++; + isrg = 0; + } +} + +irqreturn_t gfar_receive(int irq, void *dev_id); +int startup_gfar(struct net_device *dev); +void stop_gfar(struct net_device *dev); +void reset_gfar(struct net_device *dev); +void gfar_mac_reset(struct gfar_private *priv); +void gfar_halt(struct gfar_private *priv); +void gfar_start(struct gfar_private *priv); +void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, + u32 regnum, u32 read); +void gfar_configure_coalescing_all(struct gfar_private *priv); int gfar_set_features(struct net_device *dev, netdev_features_t features); -extern void gfar_check_rx_parser_mode(struct gfar_private *priv); -extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features); extern const struct ethtool_ops gfar_ethtool_ops; @@ -1213,4 +1268,7 @@ struct filer_table { struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; }; +/* The gianfar_ptp module will set this variable */ +extern int gfar_phc_index; + #endif /* __GIANFAR_H */ diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 5a3b2e5b288..76d70708f86 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1,5 +1,5 @@ /* - * drivers/net/gianfar_ethtool.c + * drivers/net/ethernet/freescale/gianfar_ethtool.c * * Gianfar Ethernet Driver * Ethtool support for Gianfar Enet @@ -22,10 +22,10 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/net_tstamp.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> @@ -44,22 +44,23 @@ #include "gianfar.h" -extern void gfar_start(struct net_device *dev); -extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); - #define GFAR_MAX_COAL_USECS 0xffff #define GFAR_MAX_COAL_FRAMES 0xff static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, - u64 * buf); + u64 *buf); static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); -static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); -static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); -static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals); -static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals); -static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); - -static char stat_gstrings[][ETH_GSTRING_LEN] = { - "rx-dropped-by-kernel", +static int gfar_gcoalesce(struct net_device *dev, + struct ethtool_coalesce *cvals); +static int gfar_scoalesce(struct net_device *dev, + struct ethtool_coalesce *cvals); +static void gfar_gringparam(struct net_device *dev, + struct ethtool_ringparam *rvals); +static int gfar_sringparam(struct net_device *dev, + struct ethtool_ringparam *rvals); +static void gfar_gdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo); + +static const char stat_gstrings[][ETH_GSTRING_LEN] = { "rx-large-frame-errors", "rx-short-frame-errors", "rx-non-octet-errors", @@ -129,32 +130,30 @@ static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); else memcpy(buf, stat_gstrings, - GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); + GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); } /* Fill in an array of 64-bit statistics from various sources. * This array will be appended to the end of the ethtool_stats * structure, and returned to user space */ -static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf) +static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, + u64 *buf) { int i; struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; - u64 *extra = (u64 *) & priv->extra_stats; + atomic64_t *extra = (atomic64_t *)&priv->extra_stats; + + for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) + buf[i] = atomic64_read(&extra[i]); if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { u32 __iomem *rmon = (u32 __iomem *) ®s->rmon; - struct gfar_stats *stats = (struct gfar_stats *) buf; - for (i = 0; i < GFAR_RMON_LEN; i++) - stats->rmon[i] = (u64) gfar_read(&rmon[i]); - - for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) - stats->extra[i] = extra[i]; - } else - for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) - buf[i] = extra[i]; + for (; i < GFAR_STATS_LEN; i++, rmon++) + buf[i] = (u64) gfar_read(rmon); + } } static int gfar_sset_count(struct net_device *dev, int sset) @@ -173,13 +172,14 @@ static int gfar_sset_count(struct net_device *dev, int sset) } /* Fills in the drvinfo structure with some basic info */ -static void gfar_gdrvinfo(struct net_device *dev, struct - ethtool_drvinfo *drvinfo) +static void gfar_gdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) { - strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN); - strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN); - strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN); - strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN); + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, gfar_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info)); drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } @@ -225,7 +225,8 @@ static int gfar_reglen(struct net_device *dev) } /* Return a dump of the GFAR register space */ -static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) +static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *regbuf) { int i; struct gfar_private *priv = netdev_priv(dev); @@ -238,7 +239,8 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi /* Convert microseconds to ethernet clock ticks, which changes * depending on what speed the controller is running at */ -static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs) +static unsigned int gfar_usecs2ticks(struct gfar_private *priv, + unsigned int usecs) { unsigned int count; @@ -262,7 +264,8 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use } /* Convert ethernet clock ticks to microseconds */ -static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks) +static unsigned int gfar_ticks2usecs(struct gfar_private *priv, + unsigned int ticks) { unsigned int count; @@ -287,7 +290,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic /* Get the coalescing parameters, and put them in the cvals * structure. */ -static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) +static int gfar_gcoalesce(struct net_device *dev, + struct ethtool_coalesce *cvals) { struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_rx_q *rx_queue = NULL; @@ -352,43 +356,57 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals * Both cvals->*_usecs and cvals->*_frames have to be > 0 * in order for coalescing to be active */ -static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) +static int gfar_scoalesce(struct net_device *dev, + struct ethtool_coalesce *cvals) { struct gfar_private *priv = netdev_priv(dev); - int i = 0; + int i, err = 0; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) return -EOPNOTSUPP; - /* Set up rx coalescing */ - /* As of now, we will enable/disable coalescing for all - * queues together in case of eTSEC2, this will be modified - * along with the ethtool interface */ - if ((cvals->rx_coalesce_usecs == 0) || - (cvals->rx_max_coalesced_frames == 0)) { - for (i = 0; i < priv->num_rx_queues; i++) - priv->rx_queue[i]->rxcoalescing = 0; - } else { - for (i = 0; i < priv->num_rx_queues; i++) - priv->rx_queue[i]->rxcoalescing = 1; - } - if (NULL == priv->phydev) return -ENODEV; /* Check the bounds of the values */ if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) { - pr_info("Coalescing is limited to %d microseconds\n", - GFAR_MAX_COAL_USECS); + netdev_info(dev, "Coalescing is limited to %d microseconds\n", + GFAR_MAX_COAL_USECS); return -EINVAL; } if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { - pr_info("Coalescing is limited to %d frames\n", - GFAR_MAX_COAL_FRAMES); + netdev_info(dev, "Coalescing is limited to %d frames\n", + GFAR_MAX_COAL_FRAMES); + return -EINVAL; + } + + /* Check the bounds of the values */ + if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { + netdev_info(dev, "Coalescing is limited to %d microseconds\n", + GFAR_MAX_COAL_USECS); return -EINVAL; } + if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { + netdev_info(dev, "Coalescing is limited to %d frames\n", + GFAR_MAX_COAL_FRAMES); + return -EINVAL; + } + + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); + + /* Set up rx coalescing */ + if ((cvals->rx_coalesce_usecs == 0) || + (cvals->rx_max_coalesced_frames == 0)) { + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i]->rxcoalescing = 0; + } else { + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i]->rxcoalescing = 1; + } + for (i = 0; i < priv->num_rx_queues; i++) { priv->rx_queue[i]->rxic = mk_ic_value( cvals->rx_max_coalesced_frames, @@ -405,34 +423,29 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals priv->tx_queue[i]->txcoalescing = 1; } - /* Check the bounds of the values */ - if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { - pr_info("Coalescing is limited to %d microseconds\n", - GFAR_MAX_COAL_USECS); - return -EINVAL; - } - - if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { - pr_info("Coalescing is limited to %d frames\n", - GFAR_MAX_COAL_FRAMES); - return -EINVAL; - } - for (i = 0; i < priv->num_tx_queues; i++) { priv->tx_queue[i]->txic = mk_ic_value( cvals->tx_max_coalesced_frames, gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); } - gfar_configure_coalescing(priv, 0xFF, 0xFF); + if (dev->flags & IFF_UP) { + stop_gfar(dev); + err = startup_gfar(dev); + } else { + gfar_mac_reset(priv); + } - return 0; + clear_bit_unlock(GFAR_RESETTING, &priv->state); + + return err; } /* Fills in rvals with the current ring parameters. Currently, * rx, rx_mini, and rx_jumbo rings are the same size, as mini and * jumbo are ignored by the driver */ -static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) +static void gfar_gringparam(struct net_device *dev, + struct ethtool_ringparam *rvals) { struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_tx_q *tx_queue = NULL; @@ -456,13 +469,13 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv } /* Change the current ring parameters, stopping the controller if - * necessary so that we don't mess things up while we're in - * motion. We wait for the ring to be clean before reallocating - * the rings. */ -static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) + * necessary so that we don't mess things up while we're in motion. + */ +static int gfar_sringparam(struct net_device *dev, + struct ethtool_ringparam *rvals) { struct gfar_private *priv = netdev_priv(dev); - int err = 0, i = 0; + int err = 0, i; if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) return -EINVAL; @@ -480,95 +493,142 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva return -EINVAL; } + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); - if (dev->flags & IFF_UP) { - unsigned long flags; - - /* Halt TX and RX, and process the frames which - * have already been received */ - local_irq_save(flags); - lock_tx_qs(priv); - lock_rx_qs(priv); - - gfar_halt(dev); - - unlock_rx_qs(priv); - unlock_tx_qs(priv); - local_irq_restore(flags); - - for (i = 0; i < priv->num_rx_queues; i++) - gfar_clean_rx_ring(priv->rx_queue[i], - priv->rx_queue[i]->rx_ring_size); - - /* Now we take down the rings to rebuild them */ + if (dev->flags & IFF_UP) stop_gfar(dev); - } - /* Change the size */ - for (i = 0; i < priv->num_rx_queues; i++) { + /* Change the sizes */ + for (i = 0; i < priv->num_rx_queues; i++) priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; + + for (i = 0; i < priv->num_tx_queues; i++) priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; - priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size; - } /* Rebuild the rings with the new size */ - if (dev->flags & IFF_UP) { + if (dev->flags & IFF_UP) err = startup_gfar(dev); - netif_tx_wake_all_queues(dev); - } + + clear_bit_unlock(GFAR_RESETTING, &priv->state); + return err; } -int gfar_set_features(struct net_device *dev, netdev_features_t features) +static void gfar_gpauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) { struct gfar_private *priv = netdev_priv(dev); - unsigned long flags; - int err = 0, i = 0; - netdev_features_t changed = dev->features ^ features; - if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) - gfar_vlan_mode(dev, features); + epause->autoneg = !!priv->pause_aneg_en; + epause->rx_pause = !!priv->rx_pause_en; + epause->tx_pause = !!priv->tx_pause_en; +} - if (!(changed & NETIF_F_RXCSUM)) - return 0; +static int gfar_spauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 oldadv, newadv; - if (dev->flags & IFF_UP) { - /* Halt TX and RX, and process the frames which - * have already been received */ - local_irq_save(flags); - lock_tx_qs(priv); - lock_rx_qs(priv); + if (!phydev) + return -ENODEV; - gfar_halt(dev); + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + (epause->rx_pause != epause->tx_pause))) + return -EINVAL; - unlock_tx_qs(priv); - unlock_rx_qs(priv); - local_irq_restore(flags); + priv->rx_pause_en = priv->tx_pause_en = 0; + if (epause->rx_pause) { + priv->rx_pause_en = 1; + + if (epause->tx_pause) { + priv->tx_pause_en = 1; + /* FLOW_CTRL_RX & TX */ + newadv = ADVERTISED_Pause; + } else /* FLOW_CTLR_RX */ + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; + } else if (epause->tx_pause) { + priv->tx_pause_en = 1; + /* FLOW_CTLR_TX */ + newadv = ADVERTISED_Asym_Pause; + } else + newadv = 0; - for (i = 0; i < priv->num_rx_queues; i++) - gfar_clean_rx_ring(priv->rx_queue[i], - priv->rx_queue[i]->rx_ring_size); + if (epause->autoneg) + priv->pause_aneg_en = 1; + else + priv->pause_aneg_en = 0; + + oldadv = phydev->advertising & + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + if (oldadv != newadv) { + phydev->advertising &= + ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + phydev->advertising |= newadv; + if (phydev->autoneg) + /* inform link partner of our + * new flow ctrl settings + */ + return phy_start_aneg(phydev); + + if (!epause->autoneg) { + u32 tempval; + tempval = gfar_read(®s->maccfg1); + tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + if (priv->tx_pause_en) + tempval |= MACCFG1_TX_FLOW; + if (priv->rx_pause_en) + tempval |= MACCFG1_RX_FLOW; + gfar_write(®s->maccfg1, tempval); + } + } - /* Now we take down the rings to rebuild them */ - stop_gfar(dev); + return 0; +} - dev->features = features; +int gfar_set_features(struct net_device *dev, netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + struct gfar_private *priv = netdev_priv(dev); + int err = 0; + + if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_RXCSUM))) + return 0; + + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); + + dev->features = features; + if (dev->flags & IFF_UP) { + /* Now we take down the rings to rebuild them */ + stop_gfar(dev); err = startup_gfar(dev); - netif_tx_wake_all_queues(dev); + } else { + gfar_mac_reset(priv); } + + clear_bit_unlock(GFAR_RESETTING, &priv->state); + return err; } static uint32_t gfar_get_msglevel(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + return priv->msg_enable; } static void gfar_set_msglevel(struct net_device *dev, uint32_t data) { struct gfar_private *priv = netdev_priv(dev); + priv->msg_enable = data; } @@ -613,14 +673,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) if (ethflow & RXH_L2DA) { fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | - RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; + RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); priv->cur_filer_idx = priv->cur_filer_idx - 1; fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | - RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; + RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); @@ -629,7 +689,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) if (ethflow & RXH_VLAN) { fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH | - RQFCR_AND | RQFCR_HASHTBL_0; + RQFCR_AND | RQFCR_HASHTBL_0; gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; @@ -638,7 +698,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) if (ethflow & RXH_IP_SRC) { fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | - RQFCR_AND | RQFCR_HASHTBL_0; + RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); @@ -647,7 +707,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) if (ethflow & (RXH_IP_DST)) { fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | - RQFCR_AND | RQFCR_HASHTBL_0; + RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); @@ -656,7 +716,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) if (ethflow & RXH_L3_PROTO) { fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH | - RQFCR_AND | RQFCR_HASHTBL_0; + RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); @@ -665,7 +725,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) if (ethflow & RXH_L4_B_0_1) { fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | - RQFCR_AND | RQFCR_HASHTBL_0; + RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); @@ -674,7 +734,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) if (ethflow & RXH_L4_B_2_3) { fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | - RQFCR_AND | RQFCR_HASHTBL_0; + RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); @@ -682,7 +742,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) } } -static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class) +static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, + u64 class) { unsigned int last_rule_idx = priv->cur_filer_idx; unsigned int cmp_rqfpr; @@ -692,12 +753,11 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u int j = MAX_FILER_IDX, l = 0x0; int ret = 1; - local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), - GFP_KERNEL); - local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), - GFP_KERNEL); + local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int), + GFP_KERNEL); + local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int), + GFP_KERNEL); if (!local_rqfpr || !local_rqfcr) { - pr_err("Out of memory\n"); ret = 0; goto err; } @@ -716,7 +776,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP; break; default: - pr_err("Right now this class is not supported\n"); + netdev_err(priv->ndev, + "Right now this class is not supported\n"); ret = 0; goto err; } @@ -725,14 +786,15 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u local_rqfpr[j] = priv->ftp_rqfpr[i]; local_rqfcr[j] = priv->ftp_rqfcr[i]; j--; - if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE | - RQFCR_CLE |RQFCR_AND)) && - (priv->ftp_rqfpr[i] == cmp_rqfpr)) + if ((priv->ftp_rqfcr[i] == + (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) && + (priv->ftp_rqfpr[i] == cmp_rqfpr)) break; } if (i == MAX_FILER_IDX + 1) { - pr_err("No parse rule found, can't create hash rules\n"); + netdev_err(priv->ndev, + "No parse rule found, can't create hash rules\n"); ret = 0; goto err; } @@ -742,12 +804,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u */ for (l = i+1; l < MAX_FILER_IDX; l++) { if ((priv->ftp_rqfcr[l] & RQFCR_CLE) && - !(priv->ftp_rqfcr[l] & RQFCR_AND)) { + !(priv->ftp_rqfcr[l] & RQFCR_AND)) { priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | - RQFCR_HASHTBL_0 | RQFCR_PID_MASK; + RQFCR_HASHTBL_0 | RQFCR_PID_MASK; priv->ftp_rqfpr[l] = FPR_FILER_MASK; gfar_write_filer(priv, l, priv->ftp_rqfcr[l], - priv->ftp_rqfpr[l]); + priv->ftp_rqfpr[l]); break; } @@ -772,7 +834,7 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k]; priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k]; gfar_write_filer(priv, priv->cur_filer_idx, - local_rqfcr[k], local_rqfpr[k]); + local_rqfcr[k], local_rqfpr[k]); if (!priv->cur_filer_idx) break; priv->cur_filer_idx = priv->cur_filer_idx - 1; @@ -784,7 +846,8 @@ err: return ret; } -static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) +static int gfar_set_hash_opts(struct gfar_private *priv, + struct ethtool_rxnfc *cmd) { /* write the filer rules here */ if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) @@ -795,11 +858,9 @@ static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *c static int gfar_check_filer_hardware(struct gfar_private *priv) { - struct gfar __iomem *regs = NULL; + struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 i; - regs = priv->gfargrp[0].regs; - /* Check if we are in FIFO mode */ i = gfar_read(®s->ecntrl); i &= ECNTRL_FIFM; @@ -809,10 +870,10 @@ static int gfar_check_filer_hardware(struct gfar_private *priv) i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM; if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) { netdev_info(priv->ndev, - "Receive Queue Filtering enabled\n"); + "Receive Queue Filtering enabled\n"); } else { netdev_warn(priv->ndev, - "Receive Queue Filtering disabled\n"); + "Receive Queue Filtering disabled\n"); return -EOPNOTSUPP; } } @@ -822,17 +883,18 @@ static int gfar_check_filer_hardware(struct gfar_private *priv) i &= RCTRL_PRSDEP_MASK; if (i == RCTRL_PRSDEP_MASK) { netdev_info(priv->ndev, - "Receive Queue Filtering enabled\n"); + "Receive Queue Filtering enabled\n"); } else { netdev_warn(priv->ndev, - "Receive Queue Filtering disabled\n"); + "Receive Queue Filtering disabled\n"); return -EOPNOTSUPP; } } /* Sets the properties for arbitrary filer rule - * to the first 4 Layer 4 Bytes */ - regs->rbifx = 0xC0C1C2C3; + * to the first 4 Layer 4 Bytes + */ + gfar_write(®s->rbifx, 0xC0C1C2C3); return 0; } @@ -869,14 +931,14 @@ static void gfar_set_mask(u32 mask, struct filer_table *tab) static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab) { gfar_set_mask(mask, tab); - tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE - | RQFCR_AND; + tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | + RQFCR_AND; tab->fe[tab->index].prop = value; tab->index++; } static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, - struct filer_table *tab) + struct filer_table *tab) { gfar_set_mask(mask, tab); tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag; @@ -884,8 +946,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, tab->index++; } -/* - * For setting a tuple of value and mask of type flag +/* For setting a tuple of value and mask of type flag * Example: * IP-Src = 10.0.0.0/255.0.0.0 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4 @@ -900,7 +961,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, * Further the all masks are one-padded for better hardware efficiency. */ static void gfar_set_attribute(u32 value, u32 mask, u32 flag, - struct filer_table *tab) + struct filer_table *tab) { switch (flag) { /* 3bit */ @@ -958,130 +1019,175 @@ static void gfar_set_attribute(u32 value, u32 mask, u32 flag, /* Translates value and mask for UDP, TCP or SCTP */ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value, - struct ethtool_tcpip4_spec *mask, struct filer_table *tab) + struct ethtool_tcpip4_spec *mask, + struct filer_table *tab) { - gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); - gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); - gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab); - gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab); + gfar_set_attribute(be32_to_cpu(value->ip4src), + be32_to_cpu(mask->ip4src), + RQFCR_PID_SIA, tab); + gfar_set_attribute(be32_to_cpu(value->ip4dst), + be32_to_cpu(mask->ip4dst), + RQFCR_PID_DIA, tab); + gfar_set_attribute(be16_to_cpu(value->pdst), + be16_to_cpu(mask->pdst), + RQFCR_PID_DPT, tab); + gfar_set_attribute(be16_to_cpu(value->psrc), + be16_to_cpu(mask->psrc), + RQFCR_PID_SPT, tab); gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); } /* Translates value and mask for RAW-IP4 */ static void gfar_set_user_ip(struct ethtool_usrip4_spec *value, - struct ethtool_usrip4_spec *mask, struct filer_table *tab) + struct ethtool_usrip4_spec *mask, + struct filer_table *tab) { - gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); - gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); + gfar_set_attribute(be32_to_cpu(value->ip4src), + be32_to_cpu(mask->ip4src), + RQFCR_PID_SIA, tab); + gfar_set_attribute(be32_to_cpu(value->ip4dst), + be32_to_cpu(mask->ip4dst), + RQFCR_PID_DIA, tab); gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); - gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB, - tab); + gfar_set_attribute(be32_to_cpu(value->l4_4_bytes), + be32_to_cpu(mask->l4_4_bytes), + RQFCR_PID_ARB, tab); } /* Translates value and mask for ETHER spec */ static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask, - struct filer_table *tab) + struct filer_table *tab) { u32 upper_temp_mask = 0; u32 lower_temp_mask = 0; + /* Source address */ if (!is_broadcast_ether_addr(mask->h_source)) { - if (is_zero_ether_addr(mask->h_source)) { upper_temp_mask = 0xFFFFFFFF; lower_temp_mask = 0xFFFFFFFF; } else { - upper_temp_mask = mask->h_source[0] << 16 - | mask->h_source[1] << 8 - | mask->h_source[2]; - lower_temp_mask = mask->h_source[3] << 16 - | mask->h_source[4] << 8 - | mask->h_source[5]; + upper_temp_mask = mask->h_source[0] << 16 | + mask->h_source[1] << 8 | + mask->h_source[2]; + lower_temp_mask = mask->h_source[3] << 16 | + mask->h_source[4] << 8 | + mask->h_source[5]; } /* Upper 24bit */ - gfar_set_attribute( - value->h_source[0] << 16 | value->h_source[1] - << 8 | value->h_source[2], - upper_temp_mask, RQFCR_PID_SAH, tab); + gfar_set_attribute(value->h_source[0] << 16 | + value->h_source[1] << 8 | + value->h_source[2], + upper_temp_mask, RQFCR_PID_SAH, tab); /* And the same for the lower part */ - gfar_set_attribute( - value->h_source[3] << 16 | value->h_source[4] - << 8 | value->h_source[5], - lower_temp_mask, RQFCR_PID_SAL, tab); + gfar_set_attribute(value->h_source[3] << 16 | + value->h_source[4] << 8 | + value->h_source[5], + lower_temp_mask, RQFCR_PID_SAL, tab); } /* Destination address */ if (!is_broadcast_ether_addr(mask->h_dest)) { - /* Special for destination is limited broadcast */ - if ((is_broadcast_ether_addr(value->h_dest) - && is_zero_ether_addr(mask->h_dest))) { + if ((is_broadcast_ether_addr(value->h_dest) && + is_zero_ether_addr(mask->h_dest))) { gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab); } else { - if (is_zero_ether_addr(mask->h_dest)) { upper_temp_mask = 0xFFFFFFFF; lower_temp_mask = 0xFFFFFFFF; } else { - upper_temp_mask = mask->h_dest[0] << 16 - | mask->h_dest[1] << 8 - | mask->h_dest[2]; - lower_temp_mask = mask->h_dest[3] << 16 - | mask->h_dest[4] << 8 - | mask->h_dest[5]; + upper_temp_mask = mask->h_dest[0] << 16 | + mask->h_dest[1] << 8 | + mask->h_dest[2]; + lower_temp_mask = mask->h_dest[3] << 16 | + mask->h_dest[4] << 8 | + mask->h_dest[5]; } /* Upper 24bit */ - gfar_set_attribute( - value->h_dest[0] << 16 - | value->h_dest[1] << 8 - | value->h_dest[2], - upper_temp_mask, RQFCR_PID_DAH, tab); + gfar_set_attribute(value->h_dest[0] << 16 | + value->h_dest[1] << 8 | + value->h_dest[2], + upper_temp_mask, RQFCR_PID_DAH, tab); /* And the same for the lower part */ - gfar_set_attribute( - value->h_dest[3] << 16 - | value->h_dest[4] << 8 - | value->h_dest[5], - lower_temp_mask, RQFCR_PID_DAL, tab); + gfar_set_attribute(value->h_dest[3] << 16 | + value->h_dest[4] << 8 | + value->h_dest[5], + lower_temp_mask, RQFCR_PID_DAL, tab); } } - gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab); + gfar_set_attribute(be16_to_cpu(value->h_proto), + be16_to_cpu(mask->h_proto), + RQFCR_PID_ETY, tab); +} + +static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK; +} + +static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK; +} +static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK; +} + +static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK; +} + +static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule) +{ + return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; +} + +static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule) +{ + return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; } /* Convert a rule to binary filter format of gianfar */ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, - struct filer_table *tab) + struct filer_table *tab) { u32 vlan = 0, vlan_mask = 0; u32 id = 0, id_mask = 0; u32 cfi = 0, cfi_mask = 0; u32 prio = 0, prio_mask = 0; - u32 old_index = tab->index; /* Check if vlan is wanted */ - if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) { + if ((rule->flow_type & FLOW_EXT) && + (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) { if (!rule->m_ext.vlan_tci) - rule->m_ext.vlan_tci = 0xFFFF; + rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF); vlan = RQFPR_VLN; vlan_mask = RQFPR_VLN; /* Separate the fields */ - id = rule->h_ext.vlan_tci & VLAN_VID_MASK; - id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK; - cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK; - cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK; - prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; - prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + id = vlan_tci_vid(rule); + id_mask = vlan_tci_vidm(rule); + cfi = vlan_tci_cfi(rule); + cfi_mask = vlan_tci_cfim(rule); + prio = vlan_tci_prio(rule); + prio_mask = vlan_tci_priom(rule); if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { vlan |= RQFPR_CFI; vlan_mask |= RQFPR_CFI; - } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { + } else if (cfi != VLAN_TAG_PRESENT && + cfi_mask == VLAN_TAG_PRESENT) { vlan_mask |= RQFPR_CFI; } } @@ -1089,34 +1195,36 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, switch (rule->flow_type & ~FLOW_EXT) { case TCP_V4_FLOW: gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan, - RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); + RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec, - &rule->m_u.tcp_ip4_spec, tab); + &rule->m_u.tcp_ip4_spec, tab); break; case UDP_V4_FLOW: gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan, - RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); + RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); gfar_set_basic_ip(&rule->h_u.udp_ip4_spec, - &rule->m_u.udp_ip4_spec, tab); + &rule->m_u.udp_ip4_spec, tab); break; case SCTP_V4_FLOW: gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, - tab); + tab); gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab); - gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u, - (struct ethtool_tcpip4_spec *) &rule->m_u, tab); + gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u, + (struct ethtool_tcpip4_spec *)&rule->m_u, + tab); break; case IP_USER_FLOW: gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, - tab); + tab); gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u, - (struct ethtool_usrip4_spec *) &rule->m_u, tab); + (struct ethtool_usrip4_spec *) &rule->m_u, + tab); break; case ETHER_FLOW: if (vlan) gfar_set_parse_bits(vlan, vlan_mask, tab); gfar_set_ether((struct ethhdr *) &rule->h_u, - (struct ethhdr *) &rule->m_u, tab); + (struct ethhdr *) &rule->m_u, tab); break; default: return -1; @@ -1151,7 +1259,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, tab->fe[tab->index - 1].ctrl |= RQFCR_CLE; } - /* In rare cases the cache can be full while there is free space in hw */ + /* In rare cases the cache can be full while there is + * free space in hw + */ if (tab->index > MAX_FILER_CACHE_IDX - 1) return -EBUSY; @@ -1160,7 +1270,7 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, /* Copy size filer entries */ static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], - struct gfar_filer_entry src[0], s32 size) + struct gfar_filer_entry src[0], s32 size) { while (size > 0) { size--; @@ -1170,10 +1280,12 @@ static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], } /* Delete the contents of the filer-table between start and end - * and collapse them */ + * and collapse them + */ static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) { int length; + if (end > MAX_FILER_CACHE_IDX || end < begin) return -EINVAL; @@ -1199,14 +1311,14 @@ static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) /* Make space on the wanted location */ static int gfar_expand_filer_entries(u32 begin, u32 length, - struct filer_table *tab) + struct filer_table *tab) { - if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin - > MAX_FILER_CACHE_IDX) + if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || + begin > MAX_FILER_CACHE_IDX) return -EINVAL; gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), - tab->index - length + 1); + tab->index - length + 1); tab->index += length; return 0; @@ -1214,9 +1326,10 @@ static int gfar_expand_filer_entries(u32 begin, u32 length, static int gfar_get_next_cluster_start(int start, struct filer_table *tab) { - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) - == (RQFCR_AND | RQFCR_CLE)) + for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); + start++) { + if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == + (RQFCR_AND | RQFCR_CLE)) return start; } return -1; @@ -1224,16 +1337,16 @@ static int gfar_get_next_cluster_start(int start, struct filer_table *tab) static int gfar_get_next_cluster_end(int start, struct filer_table *tab) { - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) - == (RQFCR_CLE)) + for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); + start++) { + if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == + (RQFCR_CLE)) return start; } return -1; } -/* - * Uses hardwares clustering option to reduce +/* Uses hardwares clustering option to reduce * the number of filer table entries */ static void gfar_cluster_filer(struct filer_table *tab) @@ -1243,8 +1356,7 @@ static void gfar_cluster_filer(struct filer_table *tab) while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { j = i; while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { - /* - * The cluster entries self and the previous one + /* The cluster entries self and the previous one * (a mask) must be identical! */ if (tab->fe[i].ctrl != tab->fe[j].ctrl) @@ -1259,21 +1371,21 @@ static void gfar_cluster_filer(struct filer_table *tab) jend = gfar_get_next_cluster_end(j, tab); if (jend == -1 || iend == -1) break; - /* - * First we make some free space, where our cluster + + /* First we make some free space, where our cluster * element should be. Then we copy it there and finally * delete in from its old location. */ - - if (gfar_expand_filer_entries(iend, (jend - j), tab) - == -EINVAL) + if (gfar_expand_filer_entries(iend, (jend - j), tab) == + -EINVAL) break; gfar_copy_filer_entries(&(tab->fe[iend + 1]), - &(tab->fe[jend + 1]), jend - j); + &(tab->fe[jend + 1]), jend - j); if (gfar_trim_filer_entries(jend - 1, - jend + (jend - j), tab) == -EINVAL) + jend + (jend - j), + tab) == -EINVAL) return; /* Mask out cluster bit */ @@ -1284,8 +1396,9 @@ static void gfar_cluster_filer(struct filer_table *tab) /* Swaps the masked bits of a1<>a2 and b1<>b2 */ static void gfar_swap_bits(struct gfar_filer_entry *a1, - struct gfar_filer_entry *a2, struct gfar_filer_entry *b1, - struct gfar_filer_entry *b2, u32 mask) + struct gfar_filer_entry *a2, + struct gfar_filer_entry *b1, + struct gfar_filer_entry *b2, u32 mask) { u32 temp[4]; temp[0] = a1->ctrl & mask; @@ -1304,13 +1417,12 @@ static void gfar_swap_bits(struct gfar_filer_entry *a1, b2->ctrl |= temp[2]; } -/* - * Generate a list consisting of masks values with their start and +/* Generate a list consisting of masks values with their start and * end of validity and block as indicator for parts belonging * together (glued by ANDs) in mask_table */ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *tab) + struct filer_table *tab) { u32 i, and_index = 0, block_index = 1; @@ -1326,13 +1438,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, and_index++; } /* cluster starts and ends will be separated because they should - * hold their position */ + * hold their position + */ if (tab->fe[i].ctrl & RQFCR_CLE) block_index++; /* A not set AND indicates the end of a depended block */ if (!(tab->fe[i].ctrl & RQFCR_AND)) block_index++; - } mask_table[and_index - 1].end = i - 1; @@ -1340,14 +1452,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, return and_index; } -/* - * Sorts the entries of mask_table by the values of the masks. +/* Sorts the entries of mask_table by the values of the masks. * Important: The 0xFF80 flags of the first and last entry of a * block must hold their position (which queue, CLusterEnable, ReJEct, * AND) */ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *temp_table, u32 and_index) + struct filer_table *temp_table, u32 and_index) { /* Pointer to compare function (_asc or _desc) */ int (*gfar_comp)(const void *, const void *); @@ -1358,16 +1469,16 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, gfar_comp = &gfar_comp_desc; for (i = 0; i < and_index; i++) { - if (prev != mask_table[i].block) { old_first = mask_table[start].start + 1; old_last = mask_table[i - 1].end; sort(mask_table + start, size, - sizeof(struct gfar_mask_entry), - gfar_comp, &gfar_swap); + sizeof(struct gfar_mask_entry), + gfar_comp, &gfar_swap); /* Toggle order for every block. This makes the - * thing more efficient! */ + * thing more efficient! + */ if (gfar_comp == gfar_comp_desc) gfar_comp = &gfar_comp_asc; else @@ -1377,12 +1488,11 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, new_last = mask_table[i - 1].end; gfar_swap_bits(&temp_table->fe[new_first], - &temp_table->fe[old_first], - &temp_table->fe[new_last], - &temp_table->fe[old_last], - RQFCR_QUEUE | RQFCR_CLE | - RQFCR_RJE | RQFCR_AND - ); + &temp_table->fe[old_first], + &temp_table->fe[new_last], + &temp_table->fe[old_last], + RQFCR_QUEUE | RQFCR_CLE | + RQFCR_RJE | RQFCR_AND); start = i; size = 0; @@ -1390,11 +1500,9 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, size++; prev = mask_table[i].block; } - } -/* - * Reduces the number of masks needed in the filer table to save entries +/* Reduces the number of masks needed in the filer table to save entries * This is done by sorting the masks of a depended block. A depended block is * identified by gluing ANDs or CLE. The sorting order toggles after every * block. Of course entries in scope of a mask must change their location with @@ -1409,13 +1517,14 @@ static int gfar_optimize_filer_masks(struct filer_table *tab) s32 ret = 0; /* We need a copy of the filer table because - * we want to change its order */ + * we want to change its order + */ temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); if (temp_table == NULL) return -ENOMEM; mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, - sizeof(struct gfar_mask_entry), GFP_KERNEL); + sizeof(struct gfar_mask_entry), GFP_KERNEL); if (mask_table == NULL) { ret = -ENOMEM; @@ -1427,7 +1536,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab) gfar_sort_mask_table(mask_table, temp_table, and_index); /* Now we can copy the data from our duplicated filer table to - * the real one in the order the mask table says */ + * the real one in the order the mask table says + */ for (i = 0; i < and_index; i++) { size = mask_table[i].end - mask_table[i].start + 1; gfar_copy_filer_entries(&(tab->fe[j]), @@ -1436,7 +1546,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab) } /* And finally we just have to check for duplicated masks and drop the - * second ones */ + * second ones + */ for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { if (tab->fe[i].ctrl == 0x80) { previous_mask = i++; @@ -1447,7 +1558,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab) if (tab->fe[i].ctrl == 0x80) { if (tab->fe[i].prop == tab->fe[previous_mask].prop) { /* Two identical ones found! - * So drop the second one! */ + * So drop the second one! + */ gfar_trim_filer_entries(i, i, tab); } else /* Not identical! */ @@ -1462,46 +1574,43 @@ end: kfree(temp_table); /* Write the bit-pattern from software's buffer to hardware registers */ static int gfar_write_filer_table(struct gfar_private *priv, - struct filer_table *tab) + struct filer_table *tab) { u32 i = 0; if (tab->index > MAX_FILER_IDX - 1) return -EBUSY; - /* Avoid inconsistent filer table to be processed */ - lock_rx_qs(priv); - /* Fill regular entries */ - for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++) + for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); + i++) gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); /* Fill the rest with fall-troughs */ for (; i < MAX_FILER_IDX - 1; i++) gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); /* Last entry must be default accept - * because that's what people expect */ + * because that's what people expect + */ gfar_write_filer(priv, i, 0x20, 0x0); - unlock_rx_qs(priv); - return 0; } static int gfar_check_capability(struct ethtool_rx_flow_spec *flow, - struct gfar_private *priv) + struct gfar_private *priv) { if (flow->flow_type & FLOW_EXT) { if (~flow->m_ext.data[0] || ~flow->m_ext.data[1]) netdev_warn(priv->ndev, - "User-specific data not supported!\n"); + "User-specific data not supported!\n"); if (~flow->m_ext.vlan_etype) netdev_warn(priv->ndev, - "VLAN-etype not supported!\n"); + "VLAN-etype not supported!\n"); } if (flow->flow_type == IP_USER_FLOW) if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) netdev_warn(priv->ndev, - "IP-Version differing from IPv4 not supported!\n"); + "IP-Version differing from IPv4 not supported!\n"); return 0; } @@ -1519,15 +1628,18 @@ static int gfar_process_filer_changes(struct gfar_private *priv) return -ENOMEM; /* Now convert the existing filer data from flow_spec into - * filer tables binary format */ + * filer tables binary format + */ list_for_each_entry(j, &priv->rx_list.list, list) { ret = gfar_convert_to_filer(&j->fs, tab); if (ret == -EBUSY) { - netdev_err(priv->ndev, "Rule not added: No free space!\n"); + netdev_err(priv->ndev, + "Rule not added: No free space!\n"); goto end; } if (ret == -1) { - netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n"); + netdev_err(priv->ndev, + "Rule not added: Unsupported Flow-type!\n"); goto end; } } @@ -1538,10 +1650,10 @@ static int gfar_process_filer_changes(struct gfar_private *priv) gfar_cluster_filer(tab); gfar_optimize_filer_masks(tab); - pr_debug("\n\tSummary:\n" - "\tData on hardware: %d\n" - "\tCompression rate: %d%%\n", - tab->index, 100 - (100 * tab->index) / i); + pr_debug("\tSummary:\n" + "\tData on hardware: %d\n" + "\tCompression rate: %d%%\n", + tab->index, 100 - (100 * tab->index) / i); /* Write everything to hardware */ ret = gfar_write_filer_table(priv, tab); @@ -1550,7 +1662,8 @@ static int gfar_process_filer_changes(struct gfar_private *priv) goto end; } -end: kfree(tab); +end: + kfree(tab); return ret; } @@ -1561,14 +1674,14 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow) for (i = 0; i < sizeof(flow->m_u); i++) flow->m_u.hdata[i] ^= 0xFF; - flow->m_ext.vlan_etype ^= 0xFFFF; - flow->m_ext.vlan_tci ^= 0xFFFF; - flow->m_ext.data[0] ^= ~0; - flow->m_ext.data[1] ^= ~0; + flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF); + flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF); + flow->m_ext.data[0] ^= cpu_to_be32(~0); + flow->m_ext.data[1] ^= cpu_to_be32(~0); } static int gfar_add_cls(struct gfar_private *priv, - struct ethtool_rx_flow_spec *flow) + struct ethtool_rx_flow_spec *flow) { struct ethtool_flow_spec_container *temp, *comp; int ret = 0; @@ -1590,7 +1703,6 @@ static int gfar_add_cls(struct gfar_private *priv, list_add(&temp->list, &priv->rx_list.list); goto process; } else { - list_for_each_entry(comp, &priv->rx_list.list, list) { if (comp->fs.location > flow->location) { list_add_tail(&temp->list, &comp->list); @@ -1598,8 +1710,8 @@ static int gfar_add_cls(struct gfar_private *priv, } if (comp->fs.location == flow->location) { netdev_err(priv->ndev, - "Rule not added: ID %d not free!\n", - flow->location); + "Rule not added: ID %d not free!\n", + flow->location); ret = -EBUSY; goto clean_mem; } @@ -1641,7 +1753,6 @@ static int gfar_del_cls(struct gfar_private *priv, u32 loc) } return ret; - } static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd) @@ -1662,7 +1773,7 @@ static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd) } static int gfar_get_cls_all(struct gfar_private *priv, - struct ethtool_rxnfc *cmd, u32 *rule_locs) + struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct ethtool_flow_spec_container *comp; u32 i = 0; @@ -1685,6 +1796,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) struct gfar_private *priv = netdev_priv(dev); int ret = 0; + if (test_bit(GFAR_RESETTING, &priv->state)) + return -EBUSY; + mutex_lock(&priv->rx_queue_access); switch (cmd->cmd) { @@ -1713,7 +1827,7 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) } static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, - u32 *rule_locs) + u32 *rule_locs) { struct gfar_private *priv = netdev_priv(dev); int ret = 0; @@ -1739,6 +1853,31 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } +int gfar_phc_index = -1; +EXPORT_SYMBOL(gfar_phc_index); + +static int gfar_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + return 0; + } + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = gfar_phc_index; + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + return 0; +} + const struct ethtool_ops gfar_ethtool_ops = { .get_settings = gfar_gsettings, .set_settings = gfar_ssettings, @@ -1750,6 +1889,8 @@ const struct ethtool_ops gfar_ethtool_ops = { .set_coalesce = gfar_scoalesce, .get_ringparam = gfar_gringparam, .set_ringparam = gfar_sringparam, + .get_pauseparam = gfar_gpauseparam, + .set_pauseparam = gfar_spauseparam, .get_strings = gfar_gstrings, .get_sset_count = gfar_sset_count, .get_ethtool_stats = gfar_fill_stats, @@ -1761,4 +1902,5 @@ const struct ethtool_ops gfar_ethtool_ops = { #endif .set_rxnfc = gfar_set_nfc, .get_rxnfc = gfar_get_nfc, + .get_ts_info = gfar_get_ts_info, }; diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 83e0ed757e3..bb568006f37 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -17,9 +17,11 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/device.h> #include <linux/hrtimer.h> -#include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> @@ -127,12 +129,11 @@ struct gianfar_ptp_registers { #define DRIVER "gianfar_ptp" #define DEFAULT_CKSEL 1 -#define N_ALARM 1 /* first alarm is used internally to reset fipers */ #define N_EXT_TS 2 #define REG_SIZE sizeof(struct gianfar_ptp_registers) struct etsects { - struct gianfar_ptp_registers *regs; + struct gianfar_ptp_registers __iomem *regs; spinlock_t lock; /* protects regs */ struct ptp_clock *clock; struct ptp_clock_info caps; @@ -410,9 +411,10 @@ static struct ptp_clock_info ptp_gianfar_caps = { .owner = THIS_MODULE, .name = "gianfar clock", .max_adj = 512000, - .n_alarm = N_ALARM, + .n_alarm = 0, .n_ext_ts = N_EXT_TS, .n_per_out = 0, + .n_pins = 0, .pps = 1, .adjfreq = ptp_gianfar_adjfreq, .adjtime = ptp_gianfar_adjtime, @@ -450,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev) err = -ENODEV; etsects->caps = ptp_gianfar_caps; - etsects->cksel = DEFAULT_CKSEL; + + if (get_of_u32(node, "fsl,cksel", &etsects->cksel)) + etsects->cksel = DEFAULT_CKSEL; if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || @@ -478,7 +482,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) pr_err("no resource\n"); goto no_resource; } - if (request_resource(&ioport_resource, etsects->rsrc)) { + if (request_resource(&iomem_resource, etsects->rsrc)) { pr_err("resource busy\n"); goto no_resource; } @@ -510,17 +514,19 @@ static int gianfar_ptp_probe(struct platform_device *dev) spin_unlock_irqrestore(&etsects->lock, flags); - etsects->clock = ptp_clock_register(&etsects->caps); + etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev); if (IS_ERR(etsects->clock)) { err = PTR_ERR(etsects->clock); goto no_clock; } + gfar_phc_index = ptp_clock_index(etsects->clock); - dev_set_drvdata(&dev->dev, etsects); + platform_set_drvdata(dev, etsects); return 0; no_clock: + iounmap(etsects->regs); no_ioremap: release_resource(etsects->rsrc); no_resource: @@ -533,11 +539,12 @@ no_memory: static int gianfar_ptp_remove(struct platform_device *dev) { - struct etsects *etsects = dev_get_drvdata(&dev->dev); + struct etsects *etsects = platform_get_drvdata(dev); gfar_write(&etsects->regs->tmr_temask, 0); gfar_write(&etsects->regs->tmr_ctrl, 0); + gfar_phc_index = -1; ptp_clock_unregister(etsects->clock); iounmap(etsects->regs); release_resource(etsects->rsrc); @@ -564,6 +571,6 @@ static struct platform_driver gianfar_ptp_driver = { module_platform_driver(gianfar_ptp_driver); -MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>"); +MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); MODULE_DESCRIPTION("PTP clock using the eTSEC"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c deleted file mode 100644 index 64f4094ac7f..00000000000 --- a/drivers/net/ethernet/freescale/gianfar_sysfs.c +++ /dev/null @@ -1,341 +0,0 @@ -/* - * drivers/net/gianfar_sysfs.c - * - * Gianfar Ethernet Driver - * This driver is designed for the non-CPM ethernet controllers - * on the 85xx and 83xx family of integrated processors - * Based on 8260_io/fcc_enet.c - * - * Author: Andy Fleming - * Maintainer: Kumar Gala (galak@kernel.crashing.org) - * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> - * - * Copyright 2002-2009 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * Sysfs file creation and management - */ - -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/unistd.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/etherdevice.h> -#include <linux/spinlock.h> -#include <linux/mm.h> -#include <linux/device.h> - -#include <asm/uaccess.h> -#include <linux/module.h> - -#include "gianfar.h" - -static ssize_t gfar_show_bd_stash(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off"); -} - -static ssize_t gfar_set_bd_stash(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - int new_setting = 0; - u32 temp; - unsigned long flags; - - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING)) - return count; - - - /* Find out the new setting */ - if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) - new_setting = 1; - else if (!strncmp("off", buf, count - 1) || - !strncmp("0", buf, count - 1)) - new_setting = 0; - else - return count; - - - local_irq_save(flags); - lock_rx_qs(priv); - - /* Set the new stashing value */ - priv->bd_stash_en = new_setting; - - temp = gfar_read(®s->attr); - - if (new_setting) - temp |= ATTR_BDSTASH; - else - temp &= ~(ATTR_BDSTASH); - - gfar_write(®s->attr, temp); - - unlock_rx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash); - -static ssize_t gfar_show_rx_stash_size(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", priv->rx_stash_size); -} - -static ssize_t gfar_set_rx_stash_size(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned int length = simple_strtoul(buf, NULL, 0); - u32 temp; - unsigned long flags; - - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) - return count; - - local_irq_save(flags); - lock_rx_qs(priv); - - if (length > priv->rx_buffer_size) - goto out; - - if (length == priv->rx_stash_size) - goto out; - - priv->rx_stash_size = length; - - temp = gfar_read(®s->attreli); - temp &= ~ATTRELI_EL_MASK; - temp |= ATTRELI_EL(length); - gfar_write(®s->attreli, temp); - - /* Turn stashing on/off as appropriate */ - temp = gfar_read(®s->attr); - - if (length) - temp |= ATTR_BUFSTASH; - else - temp &= ~(ATTR_BUFSTASH); - - gfar_write(®s->attr, temp); - -out: - unlock_rx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size, - gfar_set_rx_stash_size); - -/* Stashing will only be enabled when rx_stash_size != 0 */ -static ssize_t gfar_show_rx_stash_index(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", priv->rx_stash_index); -} - -static ssize_t gfar_set_rx_stash_index(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned short index = simple_strtoul(buf, NULL, 0); - u32 temp; - unsigned long flags; - - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) - return count; - - local_irq_save(flags); - lock_rx_qs(priv); - - if (index > priv->rx_stash_size) - goto out; - - if (index == priv->rx_stash_index) - goto out; - - priv->rx_stash_index = index; - - temp = gfar_read(®s->attreli); - temp &= ~ATTRELI_EI_MASK; - temp |= ATTRELI_EI(index); - gfar_write(®s->attreli, temp); - -out: - unlock_rx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index, - gfar_set_rx_stash_index); - -static ssize_t gfar_show_fifo_threshold(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", priv->fifo_threshold); -} - -static ssize_t gfar_set_fifo_threshold(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned int length = simple_strtoul(buf, NULL, 0); - u32 temp; - unsigned long flags; - - if (length > GFAR_MAX_FIFO_THRESHOLD) - return count; - - local_irq_save(flags); - lock_tx_qs(priv); - - priv->fifo_threshold = length; - - temp = gfar_read(®s->fifo_tx_thr); - temp &= ~FIFO_TX_THR_MASK; - temp |= length; - gfar_write(®s->fifo_tx_thr, temp); - - unlock_tx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold, - gfar_set_fifo_threshold); - -static ssize_t gfar_show_fifo_starve(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", priv->fifo_starve); -} - -static ssize_t gfar_set_fifo_starve(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned int num = simple_strtoul(buf, NULL, 0); - u32 temp; - unsigned long flags; - - if (num > GFAR_MAX_FIFO_STARVE) - return count; - - local_irq_save(flags); - lock_tx_qs(priv); - - priv->fifo_starve = num; - - temp = gfar_read(®s->fifo_tx_starve); - temp &= ~FIFO_TX_STARVE_MASK; - temp |= num; - gfar_write(®s->fifo_tx_starve, temp); - - unlock_tx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve, - gfar_set_fifo_starve); - -static ssize_t gfar_show_fifo_starve_off(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", priv->fifo_starve_off); -} - -static ssize_t gfar_set_fifo_starve_off(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct gfar_private *priv = netdev_priv(to_net_dev(dev)); - struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned int num = simple_strtoul(buf, NULL, 0); - u32 temp; - unsigned long flags; - - if (num > GFAR_MAX_FIFO_STARVE_OFF) - return count; - - local_irq_save(flags); - lock_tx_qs(priv); - - priv->fifo_starve_off = num; - - temp = gfar_read(®s->fifo_tx_starve_shutoff); - temp &= ~FIFO_TX_STARVE_OFF_MASK; - temp |= num; - gfar_write(®s->fifo_tx_starve_shutoff, temp); - - unlock_tx_qs(priv); - local_irq_restore(flags); - - return count; -} - -static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off, - gfar_set_fifo_starve_off); - -void gfar_init_sysfs(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - int rc; - - /* Initialize the default values */ - priv->fifo_threshold = DEFAULT_FIFO_TX_THR; - priv->fifo_starve = DEFAULT_FIFO_TX_STARVE; - priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF; - - /* Create our sysfs files */ - rc = device_create_file(&dev->dev, &dev_attr_bd_stash); - rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size); - rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index); - rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold); - rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve); - rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off); - if (rc) - dev_err(&dev->dev, "Error creating gianfar sysfs files.\n"); -} diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index ba2dc083bfc..36fc429298e 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -12,6 +12,9 @@ * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> @@ -28,6 +31,8 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/workqueue.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_platform.h> @@ -42,7 +47,6 @@ #include <asm/machdep.h> #include "ucc_geth.h" -#include "fsl_pq_mdio.h" #undef DEBUG @@ -51,12 +55,6 @@ #define ugeth_dbg(format, arg...) \ ugeth_printk(KERN_DEBUG , format , ## arg) -#define ugeth_err(format, arg...) \ - ugeth_printk(KERN_ERR , format , ## arg) -#define ugeth_info(format, arg...) \ - ugeth_printk(KERN_INFO , format , ## arg) -#define ugeth_warn(format, arg...) \ - ugeth_printk(KERN_WARNING , format , ## arg) #ifdef UGETH_VERBOSE_DEBUG #define ugeth_vdbg ugeth_dbg @@ -116,10 +114,10 @@ static struct ucc_geth_info ugeth_primary_info = { .maxGroupAddrInHash = 4, .maxIndAddrInHash = 4, .prel = 7, - .maxFrameLength = 1518, + .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */ .minFrameLength = 64, - .maxD1Length = 1520, - .maxD2Length = 1520, + .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */ + .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */ .vlantype = 0x8100, .ecamptr = ((uint32_t) NULL), .eventRegMask = UCCE_OTHER, @@ -185,7 +183,7 @@ static void mem_disp(u8 *addr, int size) for (; (u32) i < (u32) addr + size4Aling; i += 4) printk("%08x ", *((u32 *) (i))); for (; (u32) i < (u32) addr + size; i++) - printk("%02x", *((u8 *) (i))); + printk("%02x", *((i))); if (notAlign == 1) printk("\r\n"); } @@ -210,13 +208,12 @@ static struct list_head *dequeue(struct list_head *lh) static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 __iomem *bd) { - struct sk_buff *skb = NULL; + struct sk_buff *skb; - skb = __skb_dequeue(&ugeth->rx_recycle); + skb = netdev_alloc_skb(ugeth->ndev, + ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT); if (!skb) - skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + - UCC_GETH_RX_DATA_BUF_ALIGNMENT); - if (skb == NULL) return NULL; /* We need the data buffer to be aligned properly. We will reserve @@ -227,8 +224,6 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - 1))); - skb->dev = ugeth->ndev; - out_be32(&((struct qe_bd __iomem *)bd)->buf, dma_map_single(ugeth->dev, skb->data, @@ -285,7 +280,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth, for (i = 0; i < num_entries; i++) { if ((snum = qe_get_snum()) < 0) { if (netif_msg_ifup(ugeth)) - ugeth_err("fill_init_enet_entries: Can not get SNUM."); + pr_err("Can not get SNUM\n"); return snum; } if ((i == 0) && skip_page_for_first_entry) @@ -296,7 +291,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth, qe_muram_alloc(thread_size, thread_alignment); if (IS_ERR_VALUE(init_enet_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory."); + pr_err("Can not allocate DPRAM memory\n"); qe_put_snum((u8) snum); return -ENOMEM; } @@ -369,10 +364,9 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth, init_enet_offset = (in_be32(p_start) & ENET_INIT_PARAM_PTR_MASK); - ugeth_info("Init enet entry %d:", i); - ugeth_info("Base address: 0x%08x", - (u32) - qe_muram_addr(init_enet_offset)); + pr_info("Init enet entry %d:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)qe_muram_addr(init_enet_offset)); mem_disp(qe_muram_addr(init_enet_offset), thread_size); } @@ -400,8 +394,8 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) { struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; - if (!(paddr_num < NUM_OF_PADDRS)) { - ugeth_warn("%s: Illagel paddr_num.", __func__); + if (paddr_num >= NUM_OF_PADDRS) { + pr_warn("%s: Invalid paddr_num: %u\n", __func__, paddr_num); return -EINVAL; } @@ -441,11 +435,6 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, QE_CR_PROTOCOL_ETHERNET, 0); } -static inline int compare_addr(u8 **addr1, u8 **addr2) -{ - return memcmp(addr1, addr2, ETH_ALEN); -} - #ifdef DEBUG static void get_statistics(struct ucc_geth_private *ugeth, struct ucc_geth_tx_firmware_statistics * @@ -577,7 +566,7 @@ static void dump_bds(struct ucc_geth_private *ugeth) length = (ugeth->ug_info->bdRingLenTx[i] * sizeof(struct qe_bd)); - ugeth_info("TX BDs[%d]", i); + pr_info("TX BDs[%d]\n", i); mem_disp(ugeth->p_tx_bd_ring[i], length); } } @@ -586,7 +575,7 @@ static void dump_bds(struct ucc_geth_private *ugeth) length = (ugeth->ug_info->bdRingLenRx[i] * sizeof(struct qe_bd)); - ugeth_info("RX BDs[%d]", i); + pr_info("RX BDs[%d]\n", i); mem_disp(ugeth->p_rx_bd_ring[i], length); } } @@ -596,93 +585,93 @@ static void dump_regs(struct ucc_geth_private *ugeth) { int i; - ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1); - ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); - - ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->maccfg1, - in_be32(&ugeth->ug_regs->maccfg1)); - ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->maccfg2, - in_be32(&ugeth->ug_regs->maccfg2)); - ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->ipgifg, - in_be32(&ugeth->ug_regs->ipgifg)); - ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->hafdup, - in_be32(&ugeth->ug_regs->hafdup)); - ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->ifctl, - in_be32(&ugeth->ug_regs->ifctl)); - ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->ifstat, - in_be32(&ugeth->ug_regs->ifstat)); - ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->macstnaddr1, - in_be32(&ugeth->ug_regs->macstnaddr1)); - ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->macstnaddr2, - in_be32(&ugeth->ug_regs->macstnaddr2)); - ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->uempr, - in_be32(&ugeth->ug_regs->uempr)); - ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->utbipar, - in_be32(&ugeth->ug_regs->utbipar)); - ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->ug_regs->uescr, - in_be16(&ugeth->ug_regs->uescr)); - ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->tx64, - in_be32(&ugeth->ug_regs->tx64)); - ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->tx127, - in_be32(&ugeth->ug_regs->tx127)); - ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->tx255, - in_be32(&ugeth->ug_regs->tx255)); - ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->rx64, - in_be32(&ugeth->ug_regs->rx64)); - ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->rx127, - in_be32(&ugeth->ug_regs->rx127)); - ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->rx255, - in_be32(&ugeth->ug_regs->rx255)); - ugeth_info("txok : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->txok, - in_be32(&ugeth->ug_regs->txok)); - ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->ug_regs->txcf, - in_be16(&ugeth->ug_regs->txcf)); - ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->tmca, - in_be32(&ugeth->ug_regs->tmca)); - ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->tbca, - in_be32(&ugeth->ug_regs->tbca)); - ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->rxfok, - in_be32(&ugeth->ug_regs->rxfok)); - ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->rxbok, - in_be32(&ugeth->ug_regs->rxbok)); - ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->rbyt, - in_be32(&ugeth->ug_regs->rbyt)); - ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->rmca, - in_be32(&ugeth->ug_regs->rmca)); - ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->rbca, - in_be32(&ugeth->ug_regs->rbca)); - ugeth_info("scar : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->scar, - in_be32(&ugeth->ug_regs->scar)); - ugeth_info("scam : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->ug_regs->scam, - in_be32(&ugeth->ug_regs->scam)); + pr_info("UCC%d Geth registers:\n", ugeth->ug_info->uf_info.ucc_num + 1); + pr_info("Base address: 0x%08x\n", (u32)ugeth->ug_regs); + + pr_info("maccfg1 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->maccfg1, + in_be32(&ugeth->ug_regs->maccfg1)); + pr_info("maccfg2 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->maccfg2, + in_be32(&ugeth->ug_regs->maccfg2)); + pr_info("ipgifg : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->ipgifg, + in_be32(&ugeth->ug_regs->ipgifg)); + pr_info("hafdup : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->hafdup, + in_be32(&ugeth->ug_regs->hafdup)); + pr_info("ifctl : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->ifctl, + in_be32(&ugeth->ug_regs->ifctl)); + pr_info("ifstat : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->ifstat, + in_be32(&ugeth->ug_regs->ifstat)); + pr_info("macstnaddr1: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->macstnaddr1, + in_be32(&ugeth->ug_regs->macstnaddr1)); + pr_info("macstnaddr2: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->macstnaddr2, + in_be32(&ugeth->ug_regs->macstnaddr2)); + pr_info("uempr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->uempr, + in_be32(&ugeth->ug_regs->uempr)); + pr_info("utbipar : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->utbipar, + in_be32(&ugeth->ug_regs->utbipar)); + pr_info("uescr : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->ug_regs->uescr, + in_be16(&ugeth->ug_regs->uescr)); + pr_info("tx64 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->tx64, + in_be32(&ugeth->ug_regs->tx64)); + pr_info("tx127 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->tx127, + in_be32(&ugeth->ug_regs->tx127)); + pr_info("tx255 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->tx255, + in_be32(&ugeth->ug_regs->tx255)); + pr_info("rx64 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rx64, + in_be32(&ugeth->ug_regs->rx64)); + pr_info("rx127 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rx127, + in_be32(&ugeth->ug_regs->rx127)); + pr_info("rx255 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rx255, + in_be32(&ugeth->ug_regs->rx255)); + pr_info("txok : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->txok, + in_be32(&ugeth->ug_regs->txok)); + pr_info("txcf : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->ug_regs->txcf, + in_be16(&ugeth->ug_regs->txcf)); + pr_info("tmca : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->tmca, + in_be32(&ugeth->ug_regs->tmca)); + pr_info("tbca : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->tbca, + in_be32(&ugeth->ug_regs->tbca)); + pr_info("rxfok : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rxfok, + in_be32(&ugeth->ug_regs->rxfok)); + pr_info("rxbok : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rxbok, + in_be32(&ugeth->ug_regs->rxbok)); + pr_info("rbyt : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rbyt, + in_be32(&ugeth->ug_regs->rbyt)); + pr_info("rmca : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rmca, + in_be32(&ugeth->ug_regs->rmca)); + pr_info("rbca : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rbca, + in_be32(&ugeth->ug_regs->rbca)); + pr_info("scar : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->scar, + in_be32(&ugeth->ug_regs->scar)); + pr_info("scam : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->scam, + in_be32(&ugeth->ug_regs->scam)); if (ugeth->p_thread_data_tx) { int numThreadsTxNumerical; @@ -707,13 +696,13 @@ static void dump_regs(struct ucc_geth_private *ugeth) break; } - ugeth_info("Thread data TXs:"); - ugeth_info("Base address: 0x%08x", - (u32) ugeth->p_thread_data_tx); + pr_info("Thread data TXs:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_thread_data_tx); for (i = 0; i < numThreadsTxNumerical; i++) { - ugeth_info("Thread data TX[%d]:", i); - ugeth_info("Base address: 0x%08x", - (u32) & ugeth->p_thread_data_tx[i]); + pr_info("Thread data TX[%d]:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)&ugeth->p_thread_data_tx[i]); mem_disp((u8 *) & ugeth->p_thread_data_tx[i], sizeof(struct ucc_geth_thread_data_tx)); } @@ -741,270 +730,260 @@ static void dump_regs(struct ucc_geth_private *ugeth) break; } - ugeth_info("Thread data RX:"); - ugeth_info("Base address: 0x%08x", - (u32) ugeth->p_thread_data_rx); + pr_info("Thread data RX:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_thread_data_rx); for (i = 0; i < numThreadsRxNumerical; i++) { - ugeth_info("Thread data RX[%d]:", i); - ugeth_info("Base address: 0x%08x", - (u32) & ugeth->p_thread_data_rx[i]); + pr_info("Thread data RX[%d]:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)&ugeth->p_thread_data_rx[i]); mem_disp((u8 *) & ugeth->p_thread_data_rx[i], sizeof(struct ucc_geth_thread_data_rx)); } } if (ugeth->p_exf_glbl_param) { - ugeth_info("EXF global param:"); - ugeth_info("Base address: 0x%08x", - (u32) ugeth->p_exf_glbl_param); + pr_info("EXF global param:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_exf_glbl_param); mem_disp((u8 *) ugeth->p_exf_glbl_param, sizeof(*ugeth->p_exf_glbl_param)); } if (ugeth->p_tx_glbl_pram) { - ugeth_info("TX global param:"); - ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); - ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->p_tx_glbl_pram->temoder, - in_be16(&ugeth->p_tx_glbl_pram->temoder)); - ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->sqptr, - in_be32(&ugeth->p_tx_glbl_pram->sqptr)); - ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, - in_be32(&ugeth->p_tx_glbl_pram-> - schedulerbasepointer)); - ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, - in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); - ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->tstate, - in_be32(&ugeth->p_tx_glbl_pram->tstate)); - ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", - (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], - ugeth->p_tx_glbl_pram->iphoffset[0]); - ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", - (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], - ugeth->p_tx_glbl_pram->iphoffset[1]); - ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", - (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], - ugeth->p_tx_glbl_pram->iphoffset[2]); - ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", - (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], - ugeth->p_tx_glbl_pram->iphoffset[3]); - ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", - (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], - ugeth->p_tx_glbl_pram->iphoffset[4]); - ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", - (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], - ugeth->p_tx_glbl_pram->iphoffset[5]); - ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", - (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], - ugeth->p_tx_glbl_pram->iphoffset[6]); - ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", - (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], - ugeth->p_tx_glbl_pram->iphoffset[7]); - ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], - in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); - ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], - in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); - ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], - in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); - ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], - in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); - ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], - in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); - ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], - in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); - ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], - in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); - ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], - in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); - ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_tx_glbl_pram->tqptr, - in_be32(&ugeth->p_tx_glbl_pram->tqptr)); + pr_info("TX global param:\n"); + pr_info("Base address: 0x%08x\n", (u32)ugeth->p_tx_glbl_pram); + pr_info("temoder : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_tx_glbl_pram->temoder, + in_be16(&ugeth->p_tx_glbl_pram->temoder)); + pr_info("sqptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->sqptr, + in_be32(&ugeth->p_tx_glbl_pram->sqptr)); + pr_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer, + in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer)); + pr_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr, + in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); + pr_info("tstate : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->tstate, + in_be32(&ugeth->p_tx_glbl_pram->tstate)); + pr_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[0], + ugeth->p_tx_glbl_pram->iphoffset[0]); + pr_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[1], + ugeth->p_tx_glbl_pram->iphoffset[1]); + pr_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[2], + ugeth->p_tx_glbl_pram->iphoffset[2]); + pr_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[3], + ugeth->p_tx_glbl_pram->iphoffset[3]); + pr_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[4], + ugeth->p_tx_glbl_pram->iphoffset[4]); + pr_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[5], + ugeth->p_tx_glbl_pram->iphoffset[5]); + pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[6], + ugeth->p_tx_glbl_pram->iphoffset[6]); + pr_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[7], + ugeth->p_tx_glbl_pram->iphoffset[7]); + pr_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[0], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); + pr_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[1], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); + pr_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[2], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); + pr_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[3], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); + pr_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[4], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); + pr_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[5], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); + pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[6], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); + pr_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[7], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); + pr_info("tqptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->tqptr, + in_be32(&ugeth->p_tx_glbl_pram->tqptr)); } if (ugeth->p_rx_glbl_pram) { - ugeth_info("RX global param:"); - ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); - ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->remoder, - in_be32(&ugeth->p_rx_glbl_pram->remoder)); - ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->rqptr, - in_be32(&ugeth->p_rx_glbl_pram->rqptr)); - ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->p_rx_glbl_pram->typeorlen, - in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); - ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", - (u32) & ugeth->p_rx_glbl_pram->rxgstpack, - ugeth->p_rx_glbl_pram->rxgstpack); - ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, - in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); - ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, - in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); - ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", - (u32) & ugeth->p_rx_glbl_pram->rstate, - ugeth->p_rx_glbl_pram->rstate); - ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->p_rx_glbl_pram->mrblr, - in_be16(&ugeth->p_rx_glbl_pram->mrblr)); - ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->rbdqptr, - in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); - ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->p_rx_glbl_pram->mflr, - in_be16(&ugeth->p_rx_glbl_pram->mflr)); - ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->p_rx_glbl_pram->minflr, - in_be16(&ugeth->p_rx_glbl_pram->minflr)); - ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->p_rx_glbl_pram->maxd1, - in_be16(&ugeth->p_rx_glbl_pram->maxd1)); - ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->p_rx_glbl_pram->maxd2, - in_be16(&ugeth->p_rx_glbl_pram->maxd2)); - ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->ecamptr, - in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); - ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->l2qt, - in_be32(&ugeth->p_rx_glbl_pram->l2qt)); - ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->l3qt[0], - in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); - ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->l3qt[1], - in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); - ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->l3qt[2], - in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); - ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->l3qt[3], - in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); - ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->l3qt[4], - in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); - ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->l3qt[5], - in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); - ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->l3qt[6], - in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); - ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->l3qt[7], - in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); - ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->p_rx_glbl_pram->vlantype, - in_be16(&ugeth->p_rx_glbl_pram->vlantype)); - ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", - (u32) & ugeth->p_rx_glbl_pram->vlantci, - in_be16(&ugeth->p_rx_glbl_pram->vlantci)); + pr_info("RX global param:\n"); + pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_glbl_pram); + pr_info("remoder : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->remoder, + in_be32(&ugeth->p_rx_glbl_pram->remoder)); + pr_info("rqptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->rqptr, + in_be32(&ugeth->p_rx_glbl_pram->rqptr)); + pr_info("typeorlen : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->typeorlen, + in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); + pr_info("rxgstpack : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_rx_glbl_pram->rxgstpack, + ugeth->p_rx_glbl_pram->rxgstpack); + pr_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr, + in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); + pr_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->intcoalescingptr, + in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); + pr_info("rstate : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_rx_glbl_pram->rstate, + ugeth->p_rx_glbl_pram->rstate); + pr_info("mrblr : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->mrblr, + in_be16(&ugeth->p_rx_glbl_pram->mrblr)); + pr_info("rbdqptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->rbdqptr, + in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); + pr_info("mflr : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->mflr, + in_be16(&ugeth->p_rx_glbl_pram->mflr)); + pr_info("minflr : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->minflr, + in_be16(&ugeth->p_rx_glbl_pram->minflr)); + pr_info("maxd1 : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->maxd1, + in_be16(&ugeth->p_rx_glbl_pram->maxd1)); + pr_info("maxd2 : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->maxd2, + in_be16(&ugeth->p_rx_glbl_pram->maxd2)); + pr_info("ecamptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->ecamptr, + in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); + pr_info("l2qt : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l2qt, + in_be32(&ugeth->p_rx_glbl_pram->l2qt)); + pr_info("l3qt[0] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[0], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); + pr_info("l3qt[1] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[1], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); + pr_info("l3qt[2] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[2], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); + pr_info("l3qt[3] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[3], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); + pr_info("l3qt[4] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[4], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); + pr_info("l3qt[5] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[5], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); + pr_info("l3qt[6] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[6], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); + pr_info("l3qt[7] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[7], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); + pr_info("vlantype : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->vlantype, + in_be16(&ugeth->p_rx_glbl_pram->vlantype)); + pr_info("vlantci : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->vlantci, + in_be16(&ugeth->p_rx_glbl_pram->vlantci)); for (i = 0; i < 64; i++) - ugeth_info - ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", - i, - (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], - ugeth->p_rx_glbl_pram->addressfiltering[i]); - ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, - in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); + pr_info("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x\n", + i, + (u32)&ugeth->p_rx_glbl_pram->addressfiltering[i], + ugeth->p_rx_glbl_pram->addressfiltering[i]); + pr_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->exfGlobalParam, + in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); } if (ugeth->p_send_q_mem_reg) { - ugeth_info("Send Q memory registers:"); - ugeth_info("Base address: 0x%08x", - (u32) ugeth->p_send_q_mem_reg); + pr_info("Send Q memory registers:\n"); + pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg); for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { - ugeth_info("SQQD[%d]:", i); - ugeth_info("Base address: 0x%08x", - (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); + pr_info("SQQD[%d]:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)&ugeth->p_send_q_mem_reg->sqqd[i]); mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], sizeof(struct ucc_geth_send_queue_qd)); } } if (ugeth->p_scheduler) { - ugeth_info("Scheduler:"); - ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); + pr_info("Scheduler:\n"); + pr_info("Base address: 0x%08x\n", (u32)ugeth->p_scheduler); mem_disp((u8 *) ugeth->p_scheduler, sizeof(*ugeth->p_scheduler)); } if (ugeth->p_tx_fw_statistics_pram) { - ugeth_info("TX FW statistics pram:"); - ugeth_info("Base address: 0x%08x", - (u32) ugeth->p_tx_fw_statistics_pram); + pr_info("TX FW statistics pram:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_tx_fw_statistics_pram); mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, sizeof(*ugeth->p_tx_fw_statistics_pram)); } if (ugeth->p_rx_fw_statistics_pram) { - ugeth_info("RX FW statistics pram:"); - ugeth_info("Base address: 0x%08x", - (u32) ugeth->p_rx_fw_statistics_pram); + pr_info("RX FW statistics pram:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_rx_fw_statistics_pram); mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, sizeof(*ugeth->p_rx_fw_statistics_pram)); } if (ugeth->p_rx_irq_coalescing_tbl) { - ugeth_info("RX IRQ coalescing tables:"); - ugeth_info("Base address: 0x%08x", - (u32) ugeth->p_rx_irq_coalescing_tbl); + pr_info("RX IRQ coalescing tables:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_rx_irq_coalescing_tbl); for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { - ugeth_info("RX IRQ coalescing table entry[%d]:", i); - ugeth_info("Base address: 0x%08x", - (u32) & ugeth->p_rx_irq_coalescing_tbl-> - coalescingentry[i]); - ugeth_info - ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_irq_coalescing_tbl-> - coalescingentry[i].interruptcoalescingmaxvalue, - in_be32(&ugeth->p_rx_irq_coalescing_tbl-> - coalescingentry[i]. - interruptcoalescingmaxvalue)); - ugeth_info - ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_irq_coalescing_tbl-> - coalescingentry[i].interruptcoalescingcounter, - in_be32(&ugeth->p_rx_irq_coalescing_tbl-> - coalescingentry[i]. - interruptcoalescingcounter)); + pr_info("RX IRQ coalescing table entry[%d]:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]); + pr_info("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i].interruptcoalescingmaxvalue, + in_be32(&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]. + interruptcoalescingmaxvalue)); + pr_info("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i].interruptcoalescingcounter, + in_be32(&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]. + interruptcoalescingcounter)); } } if (ugeth->p_rx_bd_qs_tbl) { - ugeth_info("RX BD QS tables:"); - ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); + pr_info("RX BD QS tables:\n"); + pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl); for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { - ugeth_info("RX BD QS table[%d]:", i); - ugeth_info("Base address: 0x%08x", - (u32) & ugeth->p_rx_bd_qs_tbl[i]); - ugeth_info - ("bdbaseptr : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, - in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); - ugeth_info - ("bdptr : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, - in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); - ugeth_info - ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, - in_be32(&ugeth->p_rx_bd_qs_tbl[i]. - externalbdbaseptr)); - ugeth_info - ("externalbdptr : addr - 0x%08x, val - 0x%08x", - (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, - in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); - ugeth_info("ucode RX Prefetched BDs:"); - ugeth_info("Base address: 0x%08x", - (u32) - qe_muram_addr(in_be32 - (&ugeth->p_rx_bd_qs_tbl[i]. - bdbaseptr))); + pr_info("RX BD QS table[%d]:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)&ugeth->p_rx_bd_qs_tbl[i]); + pr_info("bdbaseptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); + pr_info("bdptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); + pr_info("externalbdbaseptr: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i]. + externalbdbaseptr)); + pr_info("externalbdptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); + pr_info("ucode RX Prefetched BDs:\n"); + pr_info("Base address: 0x%08x\n", + (u32)qe_muram_addr(in_be32 + (&ugeth->p_rx_bd_qs_tbl[i]. + bdbaseptr))); mem_disp((u8 *) qe_muram_addr(in_be32 (&ugeth->p_rx_bd_qs_tbl[i]. @@ -1014,9 +993,9 @@ static void dump_regs(struct ucc_geth_private *ugeth) } if (ugeth->p_init_enet_param_shadow) { int size; - ugeth_info("Init enet param shadow:"); - ugeth_info("Base address: 0x%08x", - (u32) ugeth->p_init_enet_param_shadow); + pr_info("Init enet param shadow:\n"); + pr_info("Base address: 0x%08x\n", + (u32) ugeth->p_init_enet_param_shadow); mem_disp((u8 *) ugeth->p_init_enet_param_shadow, sizeof(*ugeth->p_init_enet_param_shadow)); @@ -1396,12 +1375,11 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) struct phy_device *tbiphy; if (!ug_info->tbi_node) - ugeth_warn("TBI mode requires that the device " - "tree specify a tbi-handle\n"); + pr_warn("TBI mode requires that the device tree specify a tbi-handle\n"); tbiphy = of_phy_find_device(ug_info->tbi_node); if (!tbiphy) - ugeth_warn("Could not get TBI device\n"); + pr_warn("Could not get TBI device\n"); value = phy_read(tbiphy, ENET_TBI_MII_CR); value &= ~0x1000; /* Turn off autonegotiation */ @@ -1413,8 +1391,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); if (ret_val != 0) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", - __func__); + pr_err("Preamble length must be between 3 and 7 inclusive\n"); return ret_val; } @@ -1524,7 +1501,7 @@ static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) /* check if the UCC number is in range. */ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: ucc_num out of range.", __func__); + pr_err("ucc_num out of range\n"); return -EINVAL; } @@ -1553,7 +1530,7 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) /* check if the UCC number is in range. */ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: ucc_num out of range.", __func__); + pr_err("ucc_num out of range\n"); return -EINVAL; } @@ -1652,7 +1629,7 @@ static void adjust_link(struct net_device *dev) break; default: if (netif_msg_link(ugeth)) - ugeth_warn( + pr_warn( "%s: Ack! Speed (%d) is not 10/100/1000!", dev->name, phydev->speed); break; @@ -1751,9 +1728,6 @@ static int init_phy(struct net_device *dev) phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, priv->phy_interface); - if (!phydev) - phydev = of_phy_connect_fixed_link(dev, &adjust_link, - priv->phy_interface); if (!phydev) { dev_err(&dev->dev, "Could not attach to PHY\n"); return -ENODEV; @@ -1857,11 +1831,93 @@ static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *uge return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ } -static void ucc_geth_memclean(struct ucc_geth_private *ugeth) +static void ucc_geth_free_rx(struct ucc_geth_private *ugeth) +{ + struct ucc_geth_info *ug_info; + struct ucc_fast_info *uf_info; + u16 i, j; + u8 __iomem *bd; + + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + if (ugeth->p_rx_bd_ring[i]) { + /* Return existing data buffers in ring */ + bd = ugeth->p_rx_bd_ring[i]; + for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { + if (ugeth->rx_skbuff[i][j]) { + dma_unmap_single(ugeth->dev, + in_be32(&((struct qe_bd __iomem *)bd)->buf), + ugeth->ug_info-> + uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT, + DMA_FROM_DEVICE); + dev_kfree_skb_any( + ugeth->rx_skbuff[i][j]); + ugeth->rx_skbuff[i][j] = NULL; + } + bd += sizeof(struct qe_bd); + } + + kfree(ugeth->rx_skbuff[i]); + + if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_SYSTEM) + kfree((void *)ugeth->rx_bd_ring_offset[i]); + else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) + qe_muram_free(ugeth->rx_bd_ring_offset[i]); + ugeth->p_rx_bd_ring[i] = NULL; + } + } + +} + +static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) { + struct ucc_geth_info *ug_info; + struct ucc_fast_info *uf_info; u16 i, j; u8 __iomem *bd; + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + bd = ugeth->p_tx_bd_ring[i]; + if (!bd) + continue; + for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { + if (ugeth->tx_skbuff[i][j]) { + dma_unmap_single(ugeth->dev, + in_be32(&((struct qe_bd __iomem *)bd)->buf), + (in_be32((u32 __iomem *)bd) & + BD_LENGTH_MASK), + DMA_TO_DEVICE); + dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); + ugeth->tx_skbuff[i][j] = NULL; + } + } + + kfree(ugeth->tx_skbuff[i]); + + if (ugeth->p_tx_bd_ring[i]) { + if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_SYSTEM) + kfree((void *)ugeth->tx_bd_ring_offset[i]); + else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) + qe_muram_free(ugeth->tx_bd_ring_offset[i]); + ugeth->p_tx_bd_ring[i] = NULL; + } + } + +} + +static void ucc_geth_memclean(struct ucc_geth_private *ugeth) +{ if (!ugeth) return; @@ -1928,64 +1984,8 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) kfree(ugeth->p_init_enet_param_shadow); ugeth->p_init_enet_param_shadow = NULL; } - for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { - bd = ugeth->p_tx_bd_ring[i]; - if (!bd) - continue; - for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { - if (ugeth->tx_skbuff[i][j]) { - dma_unmap_single(ugeth->dev, - in_be32(&((struct qe_bd __iomem *)bd)->buf), - (in_be32((u32 __iomem *)bd) & - BD_LENGTH_MASK), - DMA_TO_DEVICE); - dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); - ugeth->tx_skbuff[i][j] = NULL; - } - } - - kfree(ugeth->tx_skbuff[i]); - - if (ugeth->p_tx_bd_ring[i]) { - if (ugeth->ug_info->uf_info.bd_mem_part == - MEM_PART_SYSTEM) - kfree((void *)ugeth->tx_bd_ring_offset[i]); - else if (ugeth->ug_info->uf_info.bd_mem_part == - MEM_PART_MURAM) - qe_muram_free(ugeth->tx_bd_ring_offset[i]); - ugeth->p_tx_bd_ring[i] = NULL; - } - } - for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { - if (ugeth->p_rx_bd_ring[i]) { - /* Return existing data buffers in ring */ - bd = ugeth->p_rx_bd_ring[i]; - for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { - if (ugeth->rx_skbuff[i][j]) { - dma_unmap_single(ugeth->dev, - in_be32(&((struct qe_bd __iomem *)bd)->buf), - ugeth->ug_info-> - uf_info.max_rx_buf_length + - UCC_GETH_RX_DATA_BUF_ALIGNMENT, - DMA_FROM_DEVICE); - dev_kfree_skb_any( - ugeth->rx_skbuff[i][j]); - ugeth->rx_skbuff[i][j] = NULL; - } - bd += sizeof(struct qe_bd); - } - - kfree(ugeth->rx_skbuff[i]); - - if (ugeth->ug_info->uf_info.bd_mem_part == - MEM_PART_SYSTEM) - kfree((void *)ugeth->rx_bd_ring_offset[i]); - else if (ugeth->ug_info->uf_info.bd_mem_part == - MEM_PART_MURAM) - qe_muram_free(ugeth->rx_bd_ring_offset[i]); - ugeth->p_rx_bd_ring[i] = NULL; - } - } + ucc_geth_free_tx(ugeth); + ucc_geth_free_rx(ugeth); while (!list_empty(&ugeth->group_hash_q)) put_enet_addr_container(ENET_ADDR_CONT_ENTRY (dequeue(&ugeth->group_hash_q))); @@ -1996,8 +1996,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) iounmap(ugeth->ug_regs); ugeth->ug_regs = NULL; } - - skb_queue_purge(&ugeth->rx_recycle); } static void ucc_geth_set_multi(struct net_device *dev) @@ -2083,8 +2081,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || (uf_info->bd_mem_part == MEM_PART_MURAM))) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: Bad memory partition value.", - __func__); + pr_err("Bad memory partition value\n"); return -EINVAL; } @@ -2094,9 +2091,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) (ug_info->bdRingLenRx[i] % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { if (netif_msg_probe(ugeth)) - ugeth_err - ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", - __func__); + pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n"); return -EINVAL; } } @@ -2105,9 +2100,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) for (i = 0; i < ug_info->numQueuesTx; i++) { if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { if (netif_msg_probe(ugeth)) - ugeth_err - ("%s: Tx BD ring length must be no smaller than 2.", - __func__); + pr_err("Tx BD ring length must be no smaller than 2\n"); return -EINVAL; } } @@ -2116,23 +2109,21 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) if ((uf_info->max_rx_buf_length == 0) || (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { if (netif_msg_probe(ugeth)) - ugeth_err - ("%s: max_rx_buf_length must be non-zero multiple of 128.", - __func__); + pr_err("max_rx_buf_length must be non-zero multiple of 128\n"); return -EINVAL; } /* num Tx queues */ if (ug_info->numQueuesTx > NUM_TX_QUEUES) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: number of tx queues too large.", __func__); + pr_err("number of tx queues too large\n"); return -EINVAL; } /* num Rx queues */ if (ug_info->numQueuesRx > NUM_RX_QUEUES) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: number of rx queues too large.", __func__); + pr_err("number of rx queues too large\n"); return -EINVAL; } @@ -2140,10 +2131,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { if (netif_msg_probe(ugeth)) - ugeth_err - ("%s: VLAN priority table entry must not be" - " larger than number of Rx queues.", - __func__); + pr_err("VLAN priority table entry must not be larger than number of Rx queues\n"); return -EINVAL; } } @@ -2152,18 +2140,14 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { if (netif_msg_probe(ugeth)) - ugeth_err - ("%s: IP priority table entry must not be" - " larger than number of Rx queues.", - __func__); + pr_err("IP priority table entry must not be larger than number of Rx queues\n"); return -EINVAL; } } if (ug_info->cam && !ug_info->ecamptr) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", - __func__); + pr_err("If cam mode is chosen, must supply cam ptr\n"); return -EINVAL; } @@ -2171,9 +2155,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && ug_info->rxExtendedFiltering) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: Number of station addresses greater than 1 " - "not allowed in extended parsing mode.", - __func__); + pr_err("Number of station addresses greater than 1 not allowed in extended parsing mode\n"); return -EINVAL; } @@ -2187,7 +2169,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) /* Initialize the general fast UCC block. */ if (ucc_fast_init(uf_info, &ugeth->uccf)) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: Failed to init uccf.", __func__); + pr_err("Failed to init uccf\n"); return -ENOMEM; } @@ -2202,11 +2184,168 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs)); if (!ugeth->ug_regs) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: Failed to ioremap regs.", __func__); + pr_err("Failed to ioremap regs\n"); return -ENOMEM; } - skb_queue_head_init(&ugeth->rx_recycle); + return 0; +} + +static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth) +{ + struct ucc_geth_info *ug_info; + struct ucc_fast_info *uf_info; + int length; + u16 i, j; + u8 __iomem *bd; + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + /* Allocate Tx bds */ + for (j = 0; j < ug_info->numQueuesTx; j++) { + /* Allocate in multiple of + UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, + according to spec */ + length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) + / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) + * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; + if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % + UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) + length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; + if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { + u32 align = 4; + if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) + align = UCC_GETH_TX_BD_RING_ALIGNMENT; + ugeth->tx_bd_ring_offset[j] = + (u32) kmalloc((u32) (length + align), GFP_KERNEL); + + if (ugeth->tx_bd_ring_offset[j] != 0) + ugeth->p_tx_bd_ring[j] = + (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + + align) & ~(align - 1)); + } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { + ugeth->tx_bd_ring_offset[j] = + qe_muram_alloc(length, + UCC_GETH_TX_BD_RING_ALIGNMENT); + if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) + ugeth->p_tx_bd_ring[j] = + (u8 __iomem *) qe_muram_addr(ugeth-> + tx_bd_ring_offset[j]); + } + if (!ugeth->p_tx_bd_ring[j]) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate memory for Tx bd rings\n"); + return -ENOMEM; + } + /* Zero unused end of bd ring, according to spec */ + memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + + ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, + length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); + } + + /* Init Tx bds */ + for (j = 0; j < ug_info->numQueuesTx; j++) { + /* Setup the skbuff rings */ + ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * + ugeth->ug_info->bdRingLenTx[j], + GFP_KERNEL); + + if (ugeth->tx_skbuff[j] == NULL) { + if (netif_msg_ifup(ugeth)) + pr_err("Could not allocate tx_skbuff\n"); + return -ENOMEM; + } + + for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) + ugeth->tx_skbuff[j][i] = NULL; + + ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; + bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; + for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { + /* clear bd buffer */ + out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); + /* set bd status and length */ + out_be32((u32 __iomem *)bd, 0); + bd += sizeof(struct qe_bd); + } + bd -= sizeof(struct qe_bd); + /* set bd status and length */ + out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ + } + + return 0; +} + +static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth) +{ + struct ucc_geth_info *ug_info; + struct ucc_fast_info *uf_info; + int length; + u16 i, j; + u8 __iomem *bd; + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + /* Allocate Rx bds */ + for (j = 0; j < ug_info->numQueuesRx; j++) { + length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); + if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { + u32 align = 4; + if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) + align = UCC_GETH_RX_BD_RING_ALIGNMENT; + ugeth->rx_bd_ring_offset[j] = + (u32) kmalloc((u32) (length + align), GFP_KERNEL); + if (ugeth->rx_bd_ring_offset[j] != 0) + ugeth->p_rx_bd_ring[j] = + (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + + align) & ~(align - 1)); + } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { + ugeth->rx_bd_ring_offset[j] = + qe_muram_alloc(length, + UCC_GETH_RX_BD_RING_ALIGNMENT); + if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) + ugeth->p_rx_bd_ring[j] = + (u8 __iomem *) qe_muram_addr(ugeth-> + rx_bd_ring_offset[j]); + } + if (!ugeth->p_rx_bd_ring[j]) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate memory for Rx bd rings\n"); + return -ENOMEM; + } + } + + /* Init Rx bds */ + for (j = 0; j < ug_info->numQueuesRx; j++) { + /* Setup the skbuff rings */ + ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * + ugeth->ug_info->bdRingLenRx[j], + GFP_KERNEL); + + if (ugeth->rx_skbuff[j] == NULL) { + if (netif_msg_ifup(ugeth)) + pr_err("Could not allocate rx_skbuff\n"); + return -ENOMEM; + } + + for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) + ugeth->rx_skbuff[j][i] = NULL; + + ugeth->skb_currx[j] = 0; + bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; + for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { + /* set bd status and length */ + out_be32((u32 __iomem *)bd, R_I); + /* clear bd buffer */ + out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); + bd += sizeof(struct qe_bd); + } + bd -= sizeof(struct qe_bd); + /* set bd status and length */ + out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ + } return 0; } @@ -2223,11 +2362,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) int ret_val = -EINVAL; u32 remoder = UCC_GETH_REMODER_INIT; u32 init_enet_pram_offset, cecr_subblock, command; - u32 ifstat, i, j, size, l2qt, l3qt, length; + u32 ifstat, i, j, size, l2qt, l3qt; u16 temoder = UCC_GETH_TEMODER_INIT; u16 test; u8 function_code = 0; - u8 __iomem *bd; u8 __iomem *endOfRing; u8 numThreadsRxNumerical, numThreadsTxNumerical; @@ -2256,8 +2394,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) break; default: if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Bad number of Rx threads value.", - __func__); + pr_err("Bad number of Rx threads value\n"); return -EINVAL; break; } @@ -2280,8 +2417,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) break; default: if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Bad number of Tx threads value.", - __func__); + pr_err("Bad number of Tx threads value\n"); return -EINVAL; break; } @@ -2330,8 +2466,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) &ug_regs->ipgifg); if (ret_val != 0) { if (netif_msg_ifup(ugeth)) - ugeth_err("%s: IPGIFG initialization parameter too large.", - __func__); + pr_err("IPGIFG initialization parameter too large\n"); return ret_val; } @@ -2347,8 +2482,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) &ug_regs->hafdup); if (ret_val != 0) { if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Half Duplex initialization parameter too large.", - __func__); + pr_err("Half Duplex initialization parameter too large\n"); return ret_val; } @@ -2367,142 +2501,13 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), 0, &uf_regs->upsmr, &ug_regs->uescr); - /* Allocate Tx bds */ - for (j = 0; j < ug_info->numQueuesTx; j++) { - /* Allocate in multiple of - UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, - according to spec */ - length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) - / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) - * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; - if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % - UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) - length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; - if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { - u32 align = 4; - if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) - align = UCC_GETH_TX_BD_RING_ALIGNMENT; - ugeth->tx_bd_ring_offset[j] = - (u32) kmalloc((u32) (length + align), GFP_KERNEL); - - if (ugeth->tx_bd_ring_offset[j] != 0) - ugeth->p_tx_bd_ring[j] = - (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + - align) & ~(align - 1)); - } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { - ugeth->tx_bd_ring_offset[j] = - qe_muram_alloc(length, - UCC_GETH_TX_BD_RING_ALIGNMENT); - if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) - ugeth->p_tx_bd_ring[j] = - (u8 __iomem *) qe_muram_addr(ugeth-> - tx_bd_ring_offset[j]); - } - if (!ugeth->p_tx_bd_ring[j]) { - if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate memory for Tx bd rings.", - __func__); - return -ENOMEM; - } - /* Zero unused end of bd ring, according to spec */ - memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, - length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); - } - - /* Allocate Rx bds */ - for (j = 0; j < ug_info->numQueuesRx; j++) { - length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); - if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { - u32 align = 4; - if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) - align = UCC_GETH_RX_BD_RING_ALIGNMENT; - ugeth->rx_bd_ring_offset[j] = - (u32) kmalloc((u32) (length + align), GFP_KERNEL); - if (ugeth->rx_bd_ring_offset[j] != 0) - ugeth->p_rx_bd_ring[j] = - (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + - align) & ~(align - 1)); - } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { - ugeth->rx_bd_ring_offset[j] = - qe_muram_alloc(length, - UCC_GETH_RX_BD_RING_ALIGNMENT); - if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) - ugeth->p_rx_bd_ring[j] = - (u8 __iomem *) qe_muram_addr(ugeth-> - rx_bd_ring_offset[j]); - } - if (!ugeth->p_rx_bd_ring[j]) { - if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate memory for Rx bd rings.", - __func__); - return -ENOMEM; - } - } - - /* Init Tx bds */ - for (j = 0; j < ug_info->numQueuesTx; j++) { - /* Setup the skbuff rings */ - ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * - ugeth->ug_info->bdRingLenTx[j], - GFP_KERNEL); - - if (ugeth->tx_skbuff[j] == NULL) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Could not allocate tx_skbuff", - __func__); - return -ENOMEM; - } - - for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) - ugeth->tx_skbuff[j][i] = NULL; - - ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; - bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; - for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { - /* clear bd buffer */ - out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); - /* set bd status and length */ - out_be32((u32 __iomem *)bd, 0); - bd += sizeof(struct qe_bd); - } - bd -= sizeof(struct qe_bd); - /* set bd status and length */ - out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ - } - - /* Init Rx bds */ - for (j = 0; j < ug_info->numQueuesRx; j++) { - /* Setup the skbuff rings */ - ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * - ugeth->ug_info->bdRingLenRx[j], - GFP_KERNEL); - - if (ugeth->rx_skbuff[j] == NULL) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Could not allocate rx_skbuff", - __func__); - return -ENOMEM; - } - - for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) - ugeth->rx_skbuff[j][i] = NULL; + ret_val = ucc_geth_alloc_tx(ugeth); + if (ret_val != 0) + return ret_val; - ugeth->skb_currx[j] = 0; - bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; - for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { - /* set bd status and length */ - out_be32((u32 __iomem *)bd, R_I); - /* clear bd buffer */ - out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); - bd += sizeof(struct qe_bd); - } - bd -= sizeof(struct qe_bd); - /* set bd status and length */ - out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ - } + ret_val = ucc_geth_alloc_rx(ugeth); + if (ret_val != 0) + return ret_val; /* * Global PRAM @@ -2514,9 +2519,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", - __func__); + pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n"); return -ENOMEM; } ugeth->p_tx_glbl_pram = @@ -2536,9 +2539,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_THREAD_DATA_ALIGNMENT); if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", - __func__); + pr_err("Can not allocate DPRAM memory for p_thread_data_tx\n"); return -ENOMEM; } @@ -2565,9 +2566,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", - __func__); + pr_err("Can not allocate DPRAM memory for p_send_q_mem_reg\n"); return -ENOMEM; } @@ -2608,9 +2607,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_SCHEDULER_ALIGNMENT); if (IS_ERR_VALUE(ugeth->scheduler_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for p_scheduler.", - __func__); + pr_err("Can not allocate DPRAM memory for p_scheduler\n"); return -ENOMEM; } @@ -2657,10 +2654,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_TX_STATISTICS_ALIGNMENT); if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for" - " p_tx_fw_statistics_pram.", - __func__); + pr_err("Can not allocate DPRAM memory for p_tx_fw_statistics_pram\n"); return -ENOMEM; } ugeth->p_tx_fw_statistics_pram = @@ -2697,9 +2691,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", - __func__); + pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n"); return -ENOMEM; } ugeth->p_rx_glbl_pram = @@ -2718,9 +2710,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_THREAD_DATA_ALIGNMENT); if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", - __func__); + pr_err("Can not allocate DPRAM memory for p_thread_data_rx\n"); return -ENOMEM; } @@ -2741,9 +2731,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_RX_STATISTICS_ALIGNMENT); if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for" - " p_rx_fw_statistics_pram.", __func__); + pr_err("Can not allocate DPRAM memory for p_rx_fw_statistics_pram\n"); return -ENOMEM; } ugeth->p_rx_fw_statistics_pram = @@ -2763,9 +2751,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for" - " p_rx_irq_coalescing_tbl.", __func__); + pr_err("Can not allocate DPRAM memory for p_rx_irq_coalescing_tbl\n"); return -ENOMEM; } @@ -2831,9 +2817,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_RX_BD_QUEUES_ALIGNMENT); if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", - __func__); + pr_err("Can not allocate DPRAM memory for p_rx_bd_qs_tbl\n"); return -ENOMEM; } @@ -2908,8 +2892,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (ug_info->rxExtendedFiltering) { if (!ug_info->extendedFilteringChainPointer) { if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Null Extended Filtering Chain Pointer.", - __func__); + pr_err("Null Extended Filtering Chain Pointer\n"); return -EINVAL; } @@ -2920,9 +2903,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for" - " p_exf_glbl_param.", __func__); + pr_err("Can not allocate DPRAM memory for p_exf_glbl_param\n"); return -ENOMEM; } @@ -2967,9 +2948,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (!(ugeth->p_init_enet_param_shadow = kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate memory for" - " p_UccInitEnetParamShadows.", __func__); + pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n"); return -ENOMEM; } /* Zero out *p_init_enet_param_shadow */ @@ -3002,8 +2981,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) (ug_info->largestexternallookupkeysize != QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Invalid largest External Lookup Key Size.", - __func__); + pr_err("Invalid largest External Lookup Key Size\n"); return -EINVAL; } ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = @@ -3012,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (ug_info->rxExtendedFiltering) { size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; if (ug_info->largestexternallookupkeysize == - QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; if (ug_info->largestexternallookupkeysize == - QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES) size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; } @@ -3028,8 +3006,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, ug_info->riscRx, 1)) != 0) { if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Can not fill p_init_enet_param_shadow.", - __func__); + pr_err("Can not fill p_init_enet_param_shadow\n"); return ret_val; } @@ -3043,8 +3020,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, ug_info->riscTx, 0)) != 0) { if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Can not fill p_init_enet_param_shadow.", - __func__); + pr_err("Can not fill p_init_enet_param_shadow\n"); return ret_val; } @@ -3052,8 +3028,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) for (i = 0; i < ug_info->numQueuesRx; i++) { if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Can not fill Rx bds with buffers.", - __func__); + pr_err("Can not fill Rx bds with buffers\n"); return ret_val; } } @@ -3062,9 +3037,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); if (IS_ERR_VALUE(init_enet_pram_offset)) { if (netif_msg_ifup(ugeth)) - ugeth_err - ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", - __func__); + pr_err("Can not allocate DPRAM memory for p_init_enet_pram\n"); return -ENOMEM; } p_init_enet_pram = @@ -3213,14 +3186,9 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit (!(bd_status & (R_F | R_L))) || (bd_status & R_ERRORS_FATAL)) { if (netif_msg_rx_err(ugeth)) - ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", - __func__, __LINE__, (u32) skb); - if (skb) { - skb->data = skb->head + NET_SKB_PAD; - skb->len = 0; - skb_reset_tail_pointer(skb); - __skb_queue_head(&ugeth->rx_recycle, skb); - } + pr_err("%d: ERROR!!! skb - 0x%08x\n", + __LINE__, (u32)skb); + dev_kfree_skb(skb); ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; dev->stats.rx_dropped++; @@ -3242,7 +3210,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit skb = get_new_skb(ugeth, bd); if (!skb) { if (netif_msg_rx_err(ugeth)) - ugeth_warn("%s: No Rx Data Buffer", __func__); + pr_warn("No Rx Data Buffer\n"); dev->stats.rx_dropped++; break; } @@ -3290,13 +3258,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) dev->stats.tx_packets++; - if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && - skb_recycle_check(skb, - ugeth->ug_info->uf_info.max_rx_buf_length + - UCC_GETH_RX_DATA_BUF_ALIGNMENT)) - __skb_queue_head(&ugeth->rx_recycle, skb); - else - dev_kfree_skb(skb); + dev_consume_skb_any(skb); ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; ugeth->skb_dirtytx[txQ] = @@ -3439,25 +3401,19 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) err = ucc_struct_init(ugeth); if (err) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Cannot configure internal struct, " - "aborting.", dev->name); + netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n"); goto err; } err = ucc_geth_startup(ugeth); if (err) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Cannot configure net device, aborting.", - dev->name); + netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n"); goto err; } err = adjust_enet_interface(ugeth); if (err) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Cannot configure net device, aborting.", - dev->name); + netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n"); goto err; } @@ -3474,8 +3430,7 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); if (err) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Cannot enable net device, aborting.", dev->name); + netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n"); goto err; } @@ -3496,35 +3451,27 @@ static int ucc_geth_open(struct net_device *dev) /* Test station address */ if (dev->dev_addr[0] & ENET_GROUP_ADDR) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Multicast address used for station " - "address - is this what you wanted?", - __func__); + netif_err(ugeth, ifup, dev, + "Multicast address used for station address - is this what you wanted?\n"); return -EINVAL; } err = init_phy(dev); if (err) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Cannot initialize PHY, aborting.", - dev->name); + netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n"); return err; } err = ucc_geth_init_mac(ugeth); if (err) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Cannot initialize MAC, aborting.", - dev->name); + netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n"); goto err; } err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0, "UCC Geth", dev); if (err) { - if (netif_msg_ifup(ugeth)) - ugeth_err("%s: Cannot get IRQ for net device, aborting.", - dev->name); + netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n"); goto err; } @@ -3611,7 +3558,7 @@ static void ucc_geth_timeout(struct net_device *dev) static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) { - struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct net_device *ndev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(ndev); if (!netif_running(ndev)) @@ -3639,7 +3586,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) static int ucc_geth_resume(struct platform_device *ofdev) { - struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct net_device *ndev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(ndev); int err; @@ -3662,8 +3609,7 @@ static int ucc_geth_resume(struct platform_device *ofdev) err = ucc_geth_init_mac(ugeth); if (err) { - ugeth_err("%s: Cannot initialize MAC, aborting.", - ndev->name); + netdev_err(ndev, "Cannot initialize MAC, aborting\n"); return err; } } @@ -3783,8 +3729,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) ug_info = &ugeth_info[ucc_num]; if (ug_info == NULL) { if (netif_msg_probe(&debug)) - ugeth_err("%s: [%d] Missing additional data!", - __func__, ucc_num); + pr_err("[%d] Missing additional data!\n", ucc_num); return -ENODEV; } @@ -3795,8 +3740,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) ug_info->uf_info.rx_clock = qe_clock_source(sprop); if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || (ug_info->uf_info.rx_clock > QE_CLK24)) { - printk(KERN_ERR - "ucc_geth: invalid rx-clock-name property\n"); + pr_err("invalid rx-clock-name property\n"); return -EINVAL; } } else { @@ -3804,13 +3748,11 @@ static int ucc_geth_probe(struct platform_device* ofdev) if (!prop) { /* If both rx-clock-name and rx-clock are missing, we want to tell people to use rx-clock-name. */ - printk(KERN_ERR - "ucc_geth: missing rx-clock-name property\n"); + pr_err("missing rx-clock-name property\n"); return -EINVAL; } if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { - printk(KERN_ERR - "ucc_geth: invalid rx-clock propperty\n"); + pr_err("invalid rx-clock propperty\n"); return -EINVAL; } ug_info->uf_info.rx_clock = *prop; @@ -3821,20 +3763,17 @@ static int ucc_geth_probe(struct platform_device* ofdev) ug_info->uf_info.tx_clock = qe_clock_source(sprop); if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || (ug_info->uf_info.tx_clock > QE_CLK24)) { - printk(KERN_ERR - "ucc_geth: invalid tx-clock-name property\n"); + pr_err("invalid tx-clock-name property\n"); return -EINVAL; } } else { prop = of_get_property(np, "tx-clock", NULL); if (!prop) { - printk(KERN_ERR - "ucc_geth: missing tx-clock-name property\n"); + pr_err("missing tx-clock-name property\n"); return -EINVAL; } if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { - printk(KERN_ERR - "ucc_geth: invalid tx-clock property\n"); + pr_err("invalid tx-clock property\n"); return -EINVAL; } ug_info->uf_info.tx_clock = *prop; @@ -3848,6 +3787,17 @@ static int ucc_geth_probe(struct platform_device* ofdev) ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!ug_info->phy_node) { + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (of_phy_is_fixed_link(np)) { + err = of_phy_register_fixed_link(np); + if (err) + return err; + } + ug_info->phy_node = np; + } /* Find the TBI PHY node. If it's not there, we don't support SGMII */ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); @@ -3885,6 +3835,8 @@ static int ucc_geth_probe(struct platform_device* ofdev) } if (max_speed == SPEED_1000) { + unsigned int snums = qe_get_num_of_snums(); + /* configure muram FIFOs for gigabit operation */ ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; @@ -3894,18 +3846,18 @@ static int ucc_geth_probe(struct platform_device* ofdev) ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; - /* If QE's snum number is 46 which means we need to support + /* If QE's snum number is 46/76 which means we need to support * 4 UECs at 1000Base-T simultaneously, we need to allocate * more Threads to Rx. */ - if (qe_get_num_of_snums() == 46) + if ((snums == 76) || (snums == 46)) ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6; else ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; } if (netif_msg_probe(&debug)) - printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n", + pr_info("UCC%1d at 0x%8x (irq = %d)\n", ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, ug_info->uf_info.irq); @@ -3944,15 +3896,15 @@ static int ucc_geth_probe(struct platform_device* ofdev) err = register_netdev(dev); if (err) { if (netif_msg_probe(ugeth)) - ugeth_err("%s: Cannot register net device, aborting.", - dev->name); + pr_err("%s: Cannot register net device, aborting\n", + dev->name); free_netdev(dev); return err; } mac_addr = of_get_mac_address(np); if (mac_addr) - memcpy(dev->dev_addr, mac_addr, 6); + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); ugeth->ug_info = ug_info; ugeth->dev = device; @@ -3964,14 +3916,12 @@ static int ucc_geth_probe(struct platform_device* ofdev) static int ucc_geth_remove(struct platform_device* ofdev) { - struct device *device = &ofdev->dev; - struct net_device *dev = dev_get_drvdata(device); + struct net_device *dev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(dev); unregister_netdev(dev); free_netdev(dev); ucc_geth_memclean(ugeth); - dev_set_drvdata(device, NULL); return 0; } @@ -4003,7 +3953,7 @@ static int __init ucc_geth_init(void) int i, ret; if (netif_msg_drv(&debug)) - printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); + pr_info(DRV_DESC "\n"); for (i = 0; i < 8; i++) memcpy(&(ugeth_info[i]), &ugeth_primary_info, sizeof(ugeth_primary_info)); diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index 2e395a2566b..75f337163ce 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -877,7 +877,7 @@ struct ucc_geth_hardware_statistics { /* Driver definitions */ #define TX_BD_RING_LEN 0x10 -#define RX_BD_RING_LEN 0x10 +#define RX_BD_RING_LEN 0x20 #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) @@ -1214,8 +1214,6 @@ struct ucc_geth_private { /* index of the first skb which hasn't been transmitted yet. */ u16 skb_dirtytx[NUM_TX_QUEUES]; - struct sk_buff_head rx_recycle; - struct ugeth_mii_info *mii_info; struct phy_device *phydev; phy_interface_t phy_interface; diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index a97257f91a3..cc83350d56b 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -16,7 +16,6 @@ */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/errno.h> #include <linux/stddef.h> #include <linux/interrupt.h> @@ -38,7 +37,7 @@ #include "ucc_geth.h" -static char hw_stat_gstrings[][ETH_GSTRING_LEN] = { +static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = { "tx-64-frames", "tx-65-127-frames", "tx-128-255-frames", @@ -59,7 +58,7 @@ static char hw_stat_gstrings[][ETH_GSTRING_LEN] = { "rx-dropped-frames", }; -static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { +static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { "tx-single-collision", "tx-multiple-collision", "tx-late-collsion", @@ -74,7 +73,7 @@ static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { "tx-jumbo-frames", }; -static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { +static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { "rx-crc-errors", "rx-alignment-errors", "rx-in-range-length-errors", @@ -160,8 +159,7 @@ uec_set_pauseparam(struct net_device *netdev, if (ugeth->phydev->autoneg) { if (netif_running(netdev)) { /* FIXME: automatically restart */ - printk(KERN_INFO - "Please re-open the interface.\n"); + netdev_info(netdev, "Please re-open the interface\n"); } } else { struct ucc_geth_info *ug_info = ugeth->ug_info; @@ -240,18 +238,18 @@ uec_set_ringparam(struct net_device *netdev, int queue = 0, ret = 0; if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) { - printk("%s: RxBD ring size must be no smaller than %d.\n", - netdev->name, UCC_GETH_RX_BD_RING_SIZE_MIN); + netdev_info(netdev, "RxBD ring size must be no smaller than %d\n", + UCC_GETH_RX_BD_RING_SIZE_MIN); return -EINVAL; } if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) { - printk("%s: RxBD ring size must be multiple of %d.\n", - netdev->name, UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT); + netdev_info(netdev, "RxBD ring size must be multiple of %d\n", + UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT); return -EINVAL; } if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) { - printk("%s: TxBD ring size must be no smaller than %d.\n", - netdev->name, UCC_GETH_TX_BD_RING_SIZE_MIN); + netdev_info(netdev, "TxBD ring size must be no smaller than %d\n", + UCC_GETH_TX_BD_RING_SIZE_MIN); return -EINVAL; } @@ -260,8 +258,7 @@ uec_set_ringparam(struct net_device *netdev, if (netif_running(netdev)) { /* FIXME: restart automatically */ - printk(KERN_INFO - "Please re-open the interface.\n"); + netdev_info(netdev, "Please re-open the interface\n"); } return ret; @@ -350,10 +347,10 @@ static void uec_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - strncpy(drvinfo->driver, DRV_NAME, 32); - strncpy(drvinfo->version, DRV_VERSION, 32); - strncpy(drvinfo->fw_version, "N/A", 32); - strncpy(drvinfo->bus_info, "QUICC ENGINE", 32); + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info)); drvinfo->eedump_len = 0; drvinfo->regdump_len = uec_get_regs_len(netdev); } @@ -415,9 +412,10 @@ static const struct ethtool_ops uec_ethtool_ops = { .get_ethtool_stats = uec_get_ethtool_stats, .get_wol = uec_get_wol, .set_wol = uec_set_wol, + .get_ts_info = ethtool_op_get_ts_info, }; void uec_set_ethtool_ops(struct net_device *netdev) { - SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops); + netdev->ethtool_ops = &uec_ethtool_ops; } diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c new file mode 100644 index 00000000000..0c9d55c862a --- /dev/null +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -0,0 +1,277 @@ +/* + * QorIQ 10G MDIO Controller + * + * Copyright 2012 Freescale Semiconductor, Inc. + * + * Authors: Andy Fleming <afleming@freescale.com> + * Timur Tabi <timur@freescale.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/mdio.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/of_mdio.h> + +/* Number of microseconds to wait for a register to respond */ +#define TIMEOUT 1000 + +struct tgec_mdio_controller { + __be32 reserved[12]; + __be32 mdio_stat; /* MDIO configuration and status */ + __be32 mdio_ctl; /* MDIO control */ + __be32 mdio_data; /* MDIO data */ + __be32 mdio_addr; /* MDIO address */ +} __packed; + +#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) +#define MDIO_STAT_BSY (1 << 0) +#define MDIO_STAT_RD_ER (1 << 1) +#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) +#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) +#define MDIO_CTL_PRE_DIS (1 << 10) +#define MDIO_CTL_SCAN_EN (1 << 11) +#define MDIO_CTL_POST_INC (1 << 14) +#define MDIO_CTL_READ (1 << 15) + +#define MDIO_DATA(x) (x & 0xffff) +#define MDIO_DATA_BSY (1 << 31) + +/* + * Wait untill the MDIO bus is free + */ +static int xgmac_wait_until_free(struct device *dev, + struct tgec_mdio_controller __iomem *regs) +{ + uint32_t status; + + /* Wait till the bus is free */ + status = spin_event_timeout( + !((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0); + if (!status) { + dev_err(dev, "timeout waiting for bus to be free\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/* + * Wait till the MDIO read or write operation is complete + */ +static int xgmac_wait_until_done(struct device *dev, + struct tgec_mdio_controller __iomem *regs) +{ + uint32_t status; + + /* Wait till the MDIO write is complete */ + status = spin_event_timeout( + !((in_be32(®s->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0); + if (!status) { + dev_err(dev, "timeout waiting for operation to complete\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/* + * Write value to the PHY for this device to the register at regnum,waiting + * until the write is done before it returns. All PHY configuration has to be + * done through the TSEC1 MIIM regs. + */ +static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) +{ + struct tgec_mdio_controller __iomem *regs = bus->priv; + uint16_t dev_addr = regnum >> 16; + int ret; + + /* Setup the MII Mgmt clock speed */ + out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Set the port and dev addr */ + out_be32(®s->mdio_ctl, + MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr)); + + /* Set the register address */ + out_be32(®s->mdio_addr, regnum & 0xffff); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Write the value to the register */ + out_be32(®s->mdio_data, MDIO_DATA(value)); + + ret = xgmac_wait_until_done(&bus->dev, regs); + if (ret) + return ret; + + return 0; +} + +/* + * Reads from register regnum in the PHY for device dev, returning the value. + * Clears miimcom first. All PHY configuration has to be done through the + * TSEC1 MIIM regs. + */ +static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct tgec_mdio_controller __iomem *regs = bus->priv; + uint16_t dev_addr = regnum >> 16; + uint32_t mdio_ctl; + uint16_t value; + int ret; + + /* Setup the MII Mgmt clock speed */ + out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Set the Port and Device Addrs */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + out_be32(®s->mdio_ctl, mdio_ctl); + + /* Set the register address */ + out_be32(®s->mdio_addr, regnum & 0xffff); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Initiate the read */ + out_be32(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); + + ret = xgmac_wait_until_done(&bus->dev, regs); + if (ret) + return ret; + + /* Return all Fs if nothing was there */ + if (in_be32(®s->mdio_stat) & MDIO_STAT_RD_ER) { + dev_err(&bus->dev, + "Error while reading PHY%d reg at %d.%d\n", + phy_id, dev_addr, regnum); + return 0xffff; + } + + value = in_be32(®s->mdio_data) & 0xffff; + dev_dbg(&bus->dev, "read %04x\n", value); + + return value; +} + +/* Reset the MIIM registers, and wait for the bus to free */ +static int xgmac_mdio_reset(struct mii_bus *bus) +{ + struct tgec_mdio_controller __iomem *regs = bus->priv; + int ret; + + mutex_lock(&bus->mdio_lock); + + /* Setup the MII Mgmt clock speed */ + out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + + ret = xgmac_wait_until_free(&bus->dev, regs); + + mutex_unlock(&bus->mdio_lock); + + return ret; +} + +static int xgmac_mdio_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mii_bus *bus; + struct resource res; + int ret; + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(&pdev->dev, "could not obtain address\n"); + return ret; + } + + bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale XGMAC MDIO Bus"; + bus->read = xgmac_mdio_read; + bus->write = xgmac_mdio_write; + bus->reset = xgmac_mdio_reset; + bus->irq = bus->priv; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); + + /* Set the PHY base address */ + bus->priv = of_iomap(np, 0); + if (!bus->priv) { + ret = -ENOMEM; + goto err_ioremap; + } + + ret = of_mdiobus_register(bus, np); + if (ret) { + dev_err(&pdev->dev, "cannot register MDIO bus\n"); + goto err_registration; + } + + platform_set_drvdata(pdev, bus); + + return 0; + +err_registration: + iounmap(bus->priv); + +err_ioremap: + mdiobus_free(bus); + + return ret; +} + +static int xgmac_mdio_remove(struct platform_device *pdev) +{ + struct mii_bus *bus = platform_get_drvdata(pdev); + + mdiobus_unregister(bus); + iounmap(bus->priv); + mdiobus_free(bus); + + return 0; +} + +static struct of_device_id xgmac_mdio_match[] = { + { + .compatible = "fsl,fman-xmdio", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgmac_mdio_match); + +static struct platform_driver xgmac_mdio_driver = { + .driver = { + .name = "fsl-fman_xmdio", + .of_match_table = xgmac_mdio_match, + }, + .probe = xgmac_mdio_probe, + .remove = xgmac_mdio_remove, +}; + +module_platform_driver(xgmac_mdio_driver); + +MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller"); +MODULE_LICENSE("GPL v2"); |
