diff options
Diffstat (limited to 'drivers/net/ethernet/ti')
| -rw-r--r-- | drivers/net/ethernet/ti/Kconfig | 12 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/Makefile | 1 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/cpmac.c | 32 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/cpsw-phy-sel.c | 221 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/cpsw.c | 1520 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/cpsw.h | 44 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/cpsw_ale.c | 125 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/cpsw_ale.h | 26 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/cpts.c | 26 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/cpts.h | 9 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/davinci_cpdma.c | 169 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/davinci_cpdma.h | 12 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/davinci_emac.c | 241 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/davinci_mdio.c | 87 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/tlan.c | 9 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/tlan.h | 1 |
16 files changed, 1894 insertions, 641 deletions
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 4426151d4ac..53150c25a96 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -49,11 +49,19 @@ config TI_DAVINCI_CPDMA To compile this driver as a module, choose M here: the module will be called davinci_cpdma. This is recommended. +config TI_CPSW_PHY_SEL + boolean "TI CPSW Switch Phy sel Support" + depends on TI_CPSW + ---help--- + This driver supports configuring of the phy mode connected to + the CPSW. + config TI_CPSW tristate "TI CPSW Switch Support" depends on ARM && (ARCH_DAVINCI || SOC_AM33XX) select TI_DAVINCI_CPDMA select TI_DAVINCI_MDIO + select TI_CPSW_PHY_SEL ---help--- This driver supports TI's CPSW Ethernet Switch. @@ -88,8 +96,8 @@ config TLAN Please email feedback to <torben.mathiasen@compaq.com>. config CPMAC - tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)" - depends on EXPERIMENTAL && AR7 + tristate "TI AR7 CPMAC Ethernet support" + depends on AR7 select PHYLIB ---help--- TI AR7 CPMAC Ethernet support diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index c65148e8aa1..9cfaab8152b 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_CPMAC) += cpmac.o obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o +obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o obj-$(CONFIG_TI_CPSW) += ti_cpsw.o ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index d9625f62b02..7399a52f7c2 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -17,7 +17,6 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> @@ -314,19 +313,6 @@ static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; static struct mii_bus *cpmac_mii; -static int cpmac_config(struct net_device *dev, struct ifmap *map) -{ - if (dev->flags & IFF_UP) - return -EBUSY; - - /* Don't allow changing the I/O address */ - if (map->base_addr != dev->base_addr) - return -EOPNOTSUPP; - - /* ignore other fields */ - return 0; -} - static void cpmac_set_multicast_list(struct net_device *dev) { struct netdev_hw_addr *ha; @@ -636,7 +622,7 @@ static void cpmac_hw_stop(struct net_device *dev) { int i; struct cpmac_priv *priv = netdev_priv(dev); - struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; + struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); ar7_device_reset(pdata->reset_bit); cpmac_write(priv->regs, CPMAC_RX_CONTROL, @@ -659,7 +645,7 @@ static void cpmac_hw_start(struct net_device *dev) { int i; struct cpmac_priv *priv = netdev_priv(dev); - struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; + struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); ar7_device_reset(pdata->reset_bit); for (i = 0; i < 8; i++) { @@ -904,10 +890,9 @@ static int cpmac_set_ringparam(struct net_device *dev, static void cpmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, "cpmac"); - strcpy(info->version, CPMAC_VERSION); - info->fw_version[0] = '\0'; - sprintf(info->bus_info, "%s", "cpmac"); + strlcpy(info->driver, "cpmac", sizeof(info->driver)); + strlcpy(info->version, CPMAC_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac"); info->regdump_len = 0; } @@ -1102,7 +1087,6 @@ static const struct net_device_ops cpmac_netdev_ops = { .ndo_tx_timeout = cpmac_tx_timeout, .ndo_set_rx_mode = cpmac_set_multicast_list, .ndo_do_ioctl = cpmac_ioctl, - .ndo_set_config = cpmac_config, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, @@ -1119,7 +1103,7 @@ static int cpmac_probe(struct platform_device *pdev) struct net_device *dev; struct plat_cpmac_data *pdata; - pdata = pdev->dev.platform_data; + pdata = dev_get_platdata(&pdev->dev); if (external_switch || dumb_switch) { strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ @@ -1173,8 +1157,8 @@ static int cpmac_probe(struct platform_device *pdev) snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); - priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, + PHY_INTERFACE_MODE_MII); if (IS_ERR(priv->phy)) { if (netif_msg_drv(priv)) diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c new file mode 100644 index 00000000000..aa8bf45e53d --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -0,0 +1,221 @@ +/* Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2013 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/of.h> +#include <linux/of_device.h> + +#include "cpsw.h" + +/* AM33xx SoC specific definitions for the CONTROL port */ +#define AM33XX_GMII_SEL_MODE_MII 0 +#define AM33XX_GMII_SEL_MODE_RMII 1 +#define AM33XX_GMII_SEL_MODE_RGMII 2 + +#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7) +#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6) + +#define GMII_SEL_MODE_MASK 0x3 + +struct cpsw_phy_sel_priv { + struct device *dev; + u32 __iomem *gmii_sel; + bool rmii_clock_external; + void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv, + phy_interface_t phy_mode, int slave); +}; + + +static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, + phy_interface_t phy_mode, int slave) +{ + u32 reg; + u32 mask; + u32 mode = 0; + + reg = readl(priv->gmii_sel); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + mode = AM33XX_GMII_SEL_MODE_RMII; + break; + + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + mode = AM33XX_GMII_SEL_MODE_RGMII; + break; + + case PHY_INTERFACE_MODE_MII: + default: + mode = AM33XX_GMII_SEL_MODE_MII; + break; + }; + + mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6); + mode <<= slave * 2; + + if (priv->rmii_clock_external) { + if (slave == 0) + mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN; + else + mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN; + } + + reg &= ~mask; + reg |= mode; + + writel(reg, priv->gmii_sel); +} + +static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv, + phy_interface_t phy_mode, int slave) +{ + u32 reg; + u32 mask; + u32 mode = 0; + + reg = readl(priv->gmii_sel); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + mode = AM33XX_GMII_SEL_MODE_RMII; + break; + + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + mode = AM33XX_GMII_SEL_MODE_RGMII; + break; + + case PHY_INTERFACE_MODE_MII: + default: + mode = AM33XX_GMII_SEL_MODE_MII; + break; + }; + + switch (slave) { + case 0: + mask = GMII_SEL_MODE_MASK; + break; + case 1: + mask = GMII_SEL_MODE_MASK << 4; + mode <<= 4; + break; + default: + dev_err(priv->dev, "invalid slave number...\n"); + return; + } + + if (priv->rmii_clock_external) + dev_err(priv->dev, "RMII External clock is not supported\n"); + + reg &= ~mask; + reg |= mode; + + writel(reg, priv->gmii_sel); +} + +static struct platform_driver cpsw_phy_sel_driver; +static int match(struct device *dev, void *data) +{ + struct device_node *node = (struct device_node *)data; + return dev->of_node == node && + dev->driver == &cpsw_phy_sel_driver.driver; +} + +void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) +{ + struct device_node *node; + struct cpsw_phy_sel_priv *priv; + + node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); + if (!node) { + dev_err(dev, "Phy mode driver DT not found\n"); + return; + } + + dev = bus_find_device(&platform_bus_type, NULL, node, match); + priv = dev_get_drvdata(dev); + + priv->cpsw_phy_sel(priv, phy_mode, slave); +} +EXPORT_SYMBOL_GPL(cpsw_phy_sel); + +static const struct of_device_id cpsw_phy_sel_id_table[] = { + { + .compatible = "ti,am3352-cpsw-phy-sel", + .data = &cpsw_gmii_sel_am3352, + }, + { + .compatible = "ti,dra7xx-cpsw-phy-sel", + .data = &cpsw_gmii_sel_dra7xx, + }, + { + .compatible = "ti,am43xx-cpsw-phy-sel", + .data = &cpsw_gmii_sel_am3352, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table); + +static int cpsw_phy_sel_probe(struct platform_device *pdev) +{ + struct resource *res; + const struct of_device_id *of_id; + struct cpsw_phy_sel_priv *priv; + + of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node); + if (!of_id) + return -EINVAL; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n"); + return -ENOMEM; + } + + priv->dev = &pdev->dev; + priv->cpsw_phy_sel = of_id->data; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel"); + priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->gmii_sel)) + return PTR_ERR(priv->gmii_sel); + + if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL)) + priv->rmii_clock_external = true; + + dev_set_drvdata(&pdev->dev, priv); + + return 0; +} + +static struct platform_driver cpsw_phy_sel_driver = { + .probe = cpsw_phy_sel_probe, + .driver = { + .name = "cpsw-phy-sel", + .owner = THIS_MODULE, + .of_match_table = cpsw_phy_sel_id_table, + }, +}; + +module_platform_driver(cpsw_phy_sel_driver); +MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 40aff684aa2..b988d16cd34 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -32,9 +32,11 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_device.h> +#include <linux/if_vlan.h> -#include <linux/platform_data/cpsw.h> +#include <linux/pinctrl/consumer.h> +#include "cpsw.h" #include "cpsw_ale.h" #include "cpts.h" #include "davinci_cpdma.h" @@ -80,6 +82,8 @@ do { \ #define CPSW_VERSION_1 0x19010a #define CPSW_VERSION_2 0x19010c +#define CPSW_VERSION_3 0x19010f +#define CPSW_VERSION_4 0x190112 #define HOST_PORT_NUM 0 #define SLIVER_SIZE 0x40 @@ -89,6 +93,7 @@ do { \ #define CPSW1_SLAVE_SIZE 0x040 #define CPSW1_CPDMA_OFFSET 0x100 #define CPSW1_STATERAM_OFFSET 0x200 +#define CPSW1_HW_STATS 0x400 #define CPSW1_CPTS_OFFSET 0x500 #define CPSW1_ALE_OFFSET 0x600 #define CPSW1_SLIVER_OFFSET 0x700 @@ -97,6 +102,7 @@ do { \ #define CPSW2_SLAVE_OFFSET 0x200 #define CPSW2_SLAVE_SIZE 0x100 #define CPSW2_CPDMA_OFFSET 0x800 +#define CPSW2_HW_STATS 0x900 #define CPSW2_STATERAM_OFFSET 0xa00 #define CPSW2_CPTS_OFFSET 0xc00 #define CPSW2_ALE_OFFSET 0xd00 @@ -118,18 +124,36 @@ do { \ #define TX_PRIORITY_MAPPING 0x33221100 #define CPDMA_TX_PRIORITY_MAP 0x76543210 +#define CPSW_VLAN_AWARE BIT(1) +#define CPSW_ALE_VLAN_AWARE 1 + +#define CPSW_FIFO_NORMAL_MODE (0 << 15) +#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) +#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) + +#define CPSW_INTPACEEN (0x3f << 16) +#define CPSW_INTPRESCALE_MASK (0x7FF << 0) +#define CPSW_CMINTMAX_CNT 63 +#define CPSW_CMINTMIN_CNT 2 +#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) +#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) + #define cpsw_enable_irq(priv) \ do { \ u32 i; \ for (i = 0; i < priv->num_irqs; i++) \ enable_irq(priv->irqs_table[i]); \ - } while (0); + } while (0) #define cpsw_disable_irq(priv) \ do { \ u32 i; \ for (i = 0; i < priv->num_irqs; i++) \ disable_irq_nosync(priv->irqs_table[i]); \ - } while (0); + } while (0) + +#define cpsw_slave_index(priv) \ + ((priv->data.dual_emac) ? priv->emac_port : \ + priv->data.active_slave) static int debug_level; module_param(debug_level, int, 0); @@ -152,6 +176,15 @@ struct cpsw_wr_regs { u32 rx_en; u32 tx_en; u32 misc_en; + u32 mem_allign1[8]; + u32 rx_thresh_stat; + u32 rx_stat; + u32 tx_stat; + u32 misc_stat; + u32 mem_allign2[8]; + u32 rx_imax; + u32 tx_imax; + }; struct cpsw_ss_regs { @@ -215,20 +248,31 @@ struct cpsw_ss_regs { #define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */ #define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */ #define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */ -#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */ +#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */ +#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */ #define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */ #define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */ #define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */ #define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */ #define TS_RX_EN (1<<0) /* Time Sync Receive Enable */ -#define CTRL_TS_BITS \ - (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \ - TS_ANNEX_D_EN | TS_LTYPE1_EN) +#define CTRL_V2_TS_BITS \ + (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ + TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN) -#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN) -#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN) -#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN) +#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN) +#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN) +#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN) + + +#define CTRL_V3_TS_BITS \ + (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ + TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\ + TS_LTYPE1_EN) + +#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN) +#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN) +#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN) /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */ #define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */ @@ -250,7 +294,7 @@ struct cpsw_ss_regs { struct cpsw_host_regs { u32 max_blks; u32 blk_cnt; - u32 flow_thresh; + u32 tx_in_ctl; u32 port_vlan; u32 tx_pri_map; u32 cpdma_tx_pri_map; @@ -270,6 +314,44 @@ struct cpsw_sliver_regs { u32 rx_pri_map; }; +struct cpsw_hw_stats { + u32 rxgoodframes; + u32 rxbroadcastframes; + u32 rxmulticastframes; + u32 rxpauseframes; + u32 rxcrcerrors; + u32 rxaligncodeerrors; + u32 rxoversizedframes; + u32 rxjabberframes; + u32 rxundersizedframes; + u32 rxfragments; + u32 __pad_0[2]; + u32 rxoctets; + u32 txgoodframes; + u32 txbroadcastframes; + u32 txmulticastframes; + u32 txpauseframes; + u32 txdeferredframes; + u32 txcollisionframes; + u32 txsinglecollframes; + u32 txmultcollframes; + u32 txexcessivecollisions; + u32 txlatecollisions; + u32 txunderrun; + u32 txcarriersenseerrors; + u32 txoctets; + u32 octetframes64; + u32 octetframes65t127; + u32 octetframes128t255; + u32 octetframes256t511; + u32 octetframes512t1023; + u32 octetframes1024tup; + u32 netoctets; + u32 rxsofoverruns; + u32 rxmofoverruns; + u32 rxdmaoverruns; +}; + struct cpsw_slave { void __iomem *regs; struct cpsw_sliver_regs __iomem *sliver; @@ -277,6 +359,9 @@ struct cpsw_slave { u32 mac_control; struct cpsw_slave_data *data; struct phy_device *phy; + struct net_device *ndev; + u32 port_vlan; + u32 open_stat; }; static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) @@ -293,17 +378,17 @@ struct cpsw_priv { spinlock_t lock; struct platform_device *pdev; struct net_device *ndev; - struct resource *cpsw_res; - struct resource *cpsw_wr_res; struct napi_struct napi; struct device *dev; struct cpsw_platform_data data; struct cpsw_ss_regs __iomem *regs; struct cpsw_wr_regs __iomem *wr_regs; + u8 __iomem *hw_stats; struct cpsw_host_regs __iomem *host_port_regs; u32 msg_enable; u32 version; - struct net_device_stats stats; + u32 coal_intvl; + u32 bus_freq_mhz; int rx_packet_max; int host_port; struct clk *clk; @@ -315,25 +400,244 @@ struct cpsw_priv { /* snapshot of IRQ numbers */ u32 irqs_table[4]; u32 num_irqs; - struct cpts cpts; + bool irq_enabled; + struct cpts *cpts; + u32 emac_port; }; +struct cpsw_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +enum { + CPSW_STATS, + CPDMA_RX_STATS, + CPDMA_TX_STATS, +}; + +#define CPSW_STAT(m) CPSW_STATS, \ + sizeof(((struct cpsw_hw_stats *)0)->m), \ + offsetof(struct cpsw_hw_stats, m) +#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ + sizeof(((struct cpdma_chan_stats *)0)->m), \ + offsetof(struct cpdma_chan_stats, m) +#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ + sizeof(((struct cpdma_chan_stats *)0)->m), \ + offsetof(struct cpdma_chan_stats, m) + +static const struct cpsw_stats cpsw_gstrings_stats[] = { + { "Good Rx Frames", CPSW_STAT(rxgoodframes) }, + { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) }, + { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) }, + { "Pause Rx Frames", CPSW_STAT(rxpauseframes) }, + { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) }, + { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) }, + { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) }, + { "Rx Jabbers", CPSW_STAT(rxjabberframes) }, + { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) }, + { "Rx Fragments", CPSW_STAT(rxfragments) }, + { "Rx Octets", CPSW_STAT(rxoctets) }, + { "Good Tx Frames", CPSW_STAT(txgoodframes) }, + { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) }, + { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) }, + { "Pause Tx Frames", CPSW_STAT(txpauseframes) }, + { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) }, + { "Collisions", CPSW_STAT(txcollisionframes) }, + { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) }, + { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) }, + { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) }, + { "Late Collisions", CPSW_STAT(txlatecollisions) }, + { "Tx Underrun", CPSW_STAT(txunderrun) }, + { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) }, + { "Tx Octets", CPSW_STAT(txoctets) }, + { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) }, + { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) }, + { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) }, + { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) }, + { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) }, + { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) }, + { "Net Octets", CPSW_STAT(netoctets) }, + { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, + { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, + { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, + { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) }, + { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, + { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, + { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) }, + { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, + { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, + { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, + { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, + { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, + { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, + { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) }, + { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) }, + { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, + { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) }, + { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) }, + { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) }, + { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) }, + { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) }, + { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) }, + { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) }, + { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) }, + { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) }, + { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) }, + { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) }, + { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) }, + { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) }, +}; + +#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats) + #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) -#define for_each_slave(priv, func, arg...) \ - do { \ - int idx; \ - for (idx = 0; idx < (priv)->data.slaves; idx++) \ - (func)((priv)->slaves + idx, ##arg); \ +#define for_each_slave(priv, func, arg...) \ + do { \ + struct cpsw_slave *slave; \ + int n; \ + if (priv->data.dual_emac) \ + (func)((priv)->slaves + priv->emac_port, ##arg);\ + else \ + for (n = (priv)->data.slaves, \ + slave = (priv)->slaves; \ + n; n--) \ + (func)(slave++, ##arg); \ + } while (0) +#define cpsw_get_slave_ndev(priv, __slave_no__) \ + (priv->slaves[__slave_no__].ndev) +#define cpsw_get_slave_priv(priv, __slave_no__) \ + ((priv->slaves[__slave_no__].ndev) ? \ + netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ + +#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ + do { \ + if (!priv->data.dual_emac) \ + break; \ + if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ + ndev = cpsw_get_slave_ndev(priv, 0); \ + priv = netdev_priv(ndev); \ + skb->dev = ndev; \ + } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ + ndev = cpsw_get_slave_ndev(priv, 1); \ + priv = netdev_priv(ndev); \ + skb->dev = ndev; \ + } \ + } while (0) +#define cpsw_add_mcast(priv, addr) \ + do { \ + if (priv->data.dual_emac) { \ + struct cpsw_slave *slave = priv->slaves + \ + priv->emac_port; \ + int slave_port = cpsw_get_slave_port(priv, \ + slave->slave_num); \ + cpsw_ale_add_mcast(priv->ale, addr, \ + 1 << slave_port | 1 << priv->host_port, \ + ALE_VLAN, slave->port_vlan, 0); \ + } else { \ + cpsw_ale_add_mcast(priv->ale, addr, \ + ALE_ALL_PORTS << priv->host_port, \ + 0, 0, 0); \ + } \ } while (0) +static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) +{ + if (priv->host_port == 0) + return slave_num + 1; + else + return slave_num; +} + +static void cpsw_set_promiscious(struct net_device *ndev, bool enable) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_ale *ale = priv->ale; + int i; + + if (priv->data.dual_emac) { + bool flag = false; + + /* Enabling promiscuous mode for one interface will be + * common for both the interface as the interface shares + * the same hardware resource. + */ + for (i = 0; i < priv->data.slaves; i++) + if (priv->slaves[i].ndev->flags & IFF_PROMISC) + flag = true; + + if (!enable && flag) { + enable = true; + dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n"); + } + + if (enable) { + /* Enable Bypass */ + cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1); + + dev_dbg(&ndev->dev, "promiscuity enabled\n"); + } else { + /* Disable Bypass */ + cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0); + dev_dbg(&ndev->dev, "promiscuity disabled\n"); + } + } else { + if (enable) { + unsigned long timeout = jiffies + HZ; + + /* Disable Learn for all ports */ + for (i = 0; i < priv->data.slaves; i++) { + cpsw_ale_control_set(ale, i, + ALE_PORT_NOLEARN, 1); + cpsw_ale_control_set(ale, i, + ALE_PORT_NO_SA_UPDATE, 1); + } + + /* Clear All Untouched entries */ + cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); + do { + cpu_relax(); + if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) + break; + } while (time_after(timeout, jiffies)); + cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); + + /* Clear all mcast from ALE */ + cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << + priv->host_port); + + /* Flood All Unicast Packets to Host port */ + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); + dev_dbg(&ndev->dev, "promiscuity enabled\n"); + } else { + /* Flood All Unicast Packets to Host port */ + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); + + /* Enable Learn for all ports */ + for (i = 0; i < priv->data.slaves; i++) { + cpsw_ale_control_set(ale, i, + ALE_PORT_NOLEARN, 0); + cpsw_ale_control_set(ale, i, + ALE_PORT_NO_SA_UPDATE, 0); + } + dev_dbg(&ndev->dev, "promiscuity disabled\n"); + } + } +} + static void cpsw_ndo_set_rx_mode(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ - dev_err(priv->dev, "Ignoring Promiscuous mode\n"); + cpsw_set_promiscious(ndev, true); return; + } else { + /* Disable promiscuous mode */ + cpsw_set_promiscious(ndev, false); } /* Clear all mcast from ALE */ @@ -344,8 +648,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* program multicast address list into ALE register */ netdev_for_each_mc_addr(ha, ndev) { - cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr, - ALE_ALL_PORTS << priv->host_port, 0, 0); + cpsw_add_mcast(priv, (u8 *)ha->addr); } } } @@ -368,78 +671,82 @@ static void cpsw_intr_disable(struct cpsw_priv *priv) return; } -void cpsw_tx_handler(void *token, int len, int status) +static void cpsw_tx_handler(void *token, int len, int status) { struct sk_buff *skb = token; struct net_device *ndev = skb->dev; struct cpsw_priv *priv = netdev_priv(ndev); + /* Check whether the queue is stopped due to stalled tx dma, if the + * queue is stopped then start the queue as we have free desc for tx + */ if (unlikely(netif_queue_stopped(ndev))) - netif_start_queue(ndev); - cpts_tx_timestamp(&priv->cpts, skb); - priv->stats.tx_packets++; - priv->stats.tx_bytes += len; + netif_wake_queue(ndev); + cpts_tx_timestamp(priv->cpts, skb); + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += len; dev_kfree_skb_any(skb); } -void cpsw_rx_handler(void *token, int len, int status) +static void cpsw_rx_handler(void *token, int len, int status) { struct sk_buff *skb = token; + struct sk_buff *new_skb; struct net_device *ndev = skb->dev; struct cpsw_priv *priv = netdev_priv(ndev); int ret = 0; - /* free and bail if we are shutting down */ - if (unlikely(!netif_running(ndev)) || - unlikely(!netif_carrier_ok(ndev))) { + cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); + + if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { + /* the interface is going down, skbs are purged */ dev_kfree_skb_any(skb); return; } - if (likely(status >= 0)) { + + new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); + if (new_skb) { skb_put(skb, len); - cpts_rx_timestamp(&priv->cpts, skb); + cpts_rx_timestamp(priv->cpts, skb); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); - priv->stats.rx_bytes += len; - priv->stats.rx_packets++; - skb = NULL; - } - - if (unlikely(!netif_running(ndev))) { - if (skb) - dev_kfree_skb_any(skb); - return; + ndev->stats.rx_bytes += len; + ndev->stats.rx_packets++; + } else { + ndev->stats.rx_dropped++; + new_skb = skb; } - if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); - if (WARN_ON(!skb)) - return; - - ret = cpdma_chan_submit(priv->rxch, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); - } - WARN_ON(ret < 0); + ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, + skb_tailroom(new_skb), 0); + if (WARN_ON(ret < 0)) + dev_kfree_skb_any(new_skb); } static irqreturn_t cpsw_interrupt(int irq, void *dev_id) { struct cpsw_priv *priv = dev_id; - if (likely(netif_running(priv->ndev))) { - cpsw_intr_disable(priv); + cpsw_intr_disable(priv); + if (priv->irq_enabled == true) { cpsw_disable_irq(priv); + priv->irq_enabled = false; + } + + if (netif_running(priv->ndev)) { napi_schedule(&priv->napi); + return IRQ_HANDLED; } - return IRQ_HANDLED; -} -static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) -{ - if (priv->host_port == 0) - return slave_num + 1; - else - return slave_num; + priv = cpsw_get_slave_priv(priv, 1); + if (!priv) + return IRQ_NONE; + + if (netif_running(priv->ndev)) { + napi_schedule(&priv->napi); + return IRQ_HANDLED; + } + return IRQ_NONE; } static int cpsw_poll(struct napi_struct *napi, int budget) @@ -448,19 +755,27 @@ static int cpsw_poll(struct napi_struct *napi, int budget) int num_tx, num_rx; num_tx = cpdma_chan_process(priv->txch, 128); - num_rx = cpdma_chan_process(priv->rxch, budget); - - if (num_rx || num_tx) - cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", - num_rx, num_tx); + if (num_tx) + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { + struct cpsw_priv *prim_cpsw; + napi_complete(napi); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma); - cpsw_enable_irq(priv); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + prim_cpsw = cpsw_get_slave_priv(priv, 0); + if (prim_cpsw->irq_enabled == false) { + prim_cpsw->irq_enabled = true; + cpsw_enable_irq(priv); + } } + if (num_rx || num_tx) + cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", + num_rx, num_tx); + return num_rx; } @@ -514,6 +829,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, /* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100) mac_control |= BIT(15); + else if (phy->speed == 10) + mac_control |= BIT(18); /* In Band mode */ *link = true; } else { @@ -548,6 +865,140 @@ static void cpsw_adjust_link(struct net_device *ndev) } } +static int cpsw_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + coal->rx_coalesce_usecs = priv->coal_intvl; + return 0; +} + +static int cpsw_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + u32 int_ctrl; + u32 num_interrupts = 0; + u32 prescale = 0; + u32 addnl_dvdr = 1; + u32 coal_intvl = 0; + + if (!coal->rx_coalesce_usecs) + return -EINVAL; + + coal_intvl = coal->rx_coalesce_usecs; + + int_ctrl = readl(&priv->wr_regs->int_control); + prescale = priv->bus_freq_mhz * 4; + + if (coal_intvl < CPSW_CMINTMIN_INTVL) + coal_intvl = CPSW_CMINTMIN_INTVL; + + if (coal_intvl > CPSW_CMINTMAX_INTVL) { + /* Interrupt pacer works with 4us Pulse, we can + * throttle further by dilating the 4us pulse. + */ + addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; + + if (addnl_dvdr > 1) { + prescale *= addnl_dvdr; + if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) + coal_intvl = (CPSW_CMINTMAX_INTVL + * addnl_dvdr); + } else { + addnl_dvdr = 1; + coal_intvl = CPSW_CMINTMAX_INTVL; + } + } + + num_interrupts = (1000 * addnl_dvdr) / coal_intvl; + writel(num_interrupts, &priv->wr_regs->rx_imax); + writel(num_interrupts, &priv->wr_regs->tx_imax); + + int_ctrl |= CPSW_INTPACEEN; + int_ctrl &= (~CPSW_INTPRESCALE_MASK); + int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); + writel(int_ctrl, &priv->wr_regs->int_control); + + cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + priv = netdev_priv(priv->slaves[i].ndev); + priv->coal_intvl = coal_intvl; + } + } else { + priv->coal_intvl = coal_intvl; + } + + return 0; +} + +static int cpsw_get_sset_count(struct net_device *ndev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return CPSW_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < CPSW_STATS_LEN; i++) { + memcpy(p, cpsw_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void cpsw_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpdma_chan_stats rx_stats; + struct cpdma_chan_stats tx_stats; + u32 val; + u8 *p; + int i; + + /* Collect Davinci CPDMA stats for Rx and Tx Channel */ + cpdma_chan_get_stats(priv->rxch, &rx_stats); + cpdma_chan_get_stats(priv->txch, &tx_stats); + + for (i = 0; i < CPSW_STATS_LEN; i++) { + switch (cpsw_gstrings_stats[i].type) { + case CPSW_STATS: + val = readl(priv->hw_stats + + cpsw_gstrings_stats[i].stat_offset); + data[i] = val; + break; + + case CPDMA_RX_STATS: + p = (u8 *)&rx_stats + + cpsw_gstrings_stats[i].stat_offset; + data[i] = *(u32 *)p; + break; + + case CPDMA_TX_STATS: + p = (u8 *)&tx_stats + + cpsw_gstrings_stats[i].stat_offset; + data[i] = *(u32 *)p; + break; + } + } +} + static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) { static char *leader = "........................................"; @@ -559,14 +1010,67 @@ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) leader + strlen(name), val); } -static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +static int cpsw_common_res_usage_state(struct cpsw_priv *priv) { - char name[32]; - u32 slave_port; + u32 i; + u32 usage_count = 0; + + if (!priv->data.dual_emac) + return 0; + + for (i = 0; i < priv->data.slaves; i++) + if (priv->slaves[i].open_stat) + usage_count++; + + return usage_count; +} + +static inline int cpsw_tx_packet_submit(struct net_device *ndev, + struct cpsw_priv *priv, struct sk_buff *skb) +{ + if (!priv->data.dual_emac) + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 0); + + if (ndev == cpsw_get_slave_ndev(priv, 0)) + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 1); + else + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 2); +} + +static inline void cpsw_add_dual_emac_def_ale_entries( + struct cpsw_priv *priv, struct cpsw_slave *slave, + u32 slave_port) +{ + u32 port_mask = 1 << slave_port | 1 << priv->host_port; - sprintf(name, "slave-%d", slave->slave_num); + if (priv->version == CPSW_VERSION_1) + slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); + else + slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); + cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask, + port_mask, port_mask, 0); + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + port_mask, ALE_VLAN, slave->port_vlan, 0); + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, slave->port_vlan); +} + +static void soft_reset_slave(struct cpsw_slave *slave) +{ + char name[32]; + snprintf(name, sizeof(name), "slave-%d", slave->slave_num); soft_reset(name, &slave->sliver->soft_reset); +} + +static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + u32 slave_port; + + soft_reset_slave(slave); /* setup priority mapping */ __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); @@ -576,6 +1080,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); break; case CPSW_VERSION_2: + case CPSW_VERSION_3: + case CPSW_VERSION_4: slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); break; } @@ -588,11 +1094,14 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave_port = cpsw_get_slave_port(priv, slave->slave_num); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - 1 << slave_port, 0, ALE_MCAST_FWD_2); + if (priv->data.dual_emac) + cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); + else + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); slave->phy = phy_connect(priv->ndev, slave->data->phy_id, - &cpsw_adjust_link, 0, slave->data->phy_if); + &cpsw_adjust_link, slave->data->phy_if); if (IS_ERR(slave->phy)) { dev_err(priv->dev, "phy %s not found on slave %d\n", slave->data->phy_id, slave->slave_num); @@ -601,17 +1110,51 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) dev_info(priv->dev, "phy found : id is : 0x%x\n", slave->phy->phy_id); phy_start(slave->phy); + + /* Configure GMII_SEL register */ + cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, + slave->slave_num); } } +static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) +{ + const int vlan = priv->data.default_vlan; + const int port = priv->host_port; + u32 reg; + int i; + + reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : + CPSW2_PORT_VLAN; + + writel(vlan, &priv->host_port_regs->port_vlan); + + for (i = 0; i < priv->data.slaves; i++) + slave_write(priv->slaves + i, vlan, reg); + + cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, + ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, + (ALE_PORT_1 | ALE_PORT_2) << port); +} + static void cpsw_init_host_port(struct cpsw_priv *priv) { + u32 control_reg; + u32 fifo_mode; + /* soft reset the controller and initialize ale */ soft_reset("cpsw", &priv->regs->soft_reset); cpsw_ale_start(priv->ale); /* switch to vlan unaware mode */ - cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0); + cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE, + CPSW_ALE_VLAN_AWARE); + control_reg = readl(&priv->regs->control); + control_reg |= CPSW_VLAN_AWARE; + writel(control_reg, &priv->regs->control); + fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : + CPSW_FIFO_NORMAL_MODE; + writel(fifo_mode, &priv->host_port_regs->tx_in_ctl); /* setup host port priority mapping */ __raw_writel(CPDMA_TX_PRIORITY_MAP, @@ -621,18 +1164,38 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) cpsw_ale_control_set(priv->ale, priv->host_port, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - 1 << priv->host_port, 0, ALE_MCAST_FWD_2); + if (!priv->data.dual_emac) { + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, + 0, 0); + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2); + } +} + +static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + u32 slave_port; + + slave_port = cpsw_get_slave_port(priv, slave->slave_num); + + if (!slave->phy) + return; + phy_stop(slave->phy); + phy_disconnect(slave->phy); + slave->phy = NULL; + cpsw_ale_control_set(priv->ale, slave_port, + ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); } static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_priv *prim_cpsw; int i, ret; u32 reg; - cpsw_intr_disable(priv); + if (!cpsw_common_res_usage_state(priv)) + cpsw_intr_disable(priv); netif_carrier_off(ndev); pm_runtime_get_sync(&priv->pdev->dev); @@ -644,53 +1207,91 @@ static int cpsw_ndo_open(struct net_device *ndev) CPSW_RTL_VERSION(reg)); /* initialize host and slave ports */ - cpsw_init_host_port(priv); + if (!cpsw_common_res_usage_state(priv)) + cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); - /* setup tx dma to fixed prio and zero offset */ - cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); - cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); - - /* disable priority elevation and enable statistics on all ports */ - __raw_writel(0, &priv->regs->ptype); + /* Add default VLAN */ + if (!priv->data.dual_emac) + cpsw_add_default_vlan(priv); + else + cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan, + ALE_ALL_PORTS << priv->host_port, + ALE_ALL_PORTS << priv->host_port, 0, 0); + + if (!cpsw_common_res_usage_state(priv)) { + /* setup tx dma to fixed prio and zero offset */ + cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); + cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); + + /* disable priority elevation */ + __raw_writel(0, &priv->regs->ptype); + + /* enable statistics collection only on all ports */ + __raw_writel(0x7, &priv->regs->stat_port_en); + + if (WARN_ON(!priv->data.rx_descs)) + priv->data.rx_descs = 128; + + for (i = 0; i < priv->data.rx_descs; i++) { + struct sk_buff *skb; + + ret = -ENOMEM; + skb = __netdev_alloc_skb_ip_align(priv->ndev, + priv->rx_packet_max, GFP_KERNEL); + if (!skb) + goto err_cleanup; + ret = cpdma_chan_submit(priv->rxch, skb, skb->data, + skb_tailroom(skb), 0); + if (ret < 0) { + kfree_skb(skb); + goto err_cleanup; + } + } + /* continue even if we didn't manage to submit all + * receive descs + */ + cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); - /* enable statistics collection only on the host port */ - __raw_writel(0x7, &priv->regs->stat_port_en); + if (cpts_register(&priv->pdev->dev, priv->cpts, + priv->data.cpts_clock_mult, + priv->data.cpts_clock_shift)) + dev_err(priv->dev, "error registering cpts device\n"); - if (WARN_ON(!priv->data.rx_descs)) - priv->data.rx_descs = 128; + } - for (i = 0; i < priv->data.rx_descs; i++) { - struct sk_buff *skb; + /* Enable Interrupt pacing if configured */ + if (priv->coal_intvl != 0) { + struct ethtool_coalesce coal; - ret = -ENOMEM; - skb = netdev_alloc_skb_ip_align(priv->ndev, - priv->rx_packet_max); - if (!skb) - break; - ret = cpdma_chan_submit(priv->rxch, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); - if (WARN_ON(ret < 0)) - break; + coal.rx_coalesce_usecs = (priv->coal_intvl << 4); + cpsw_set_coalesce(ndev, &coal); } - /* continue even if we didn't manage to submit all receive descs */ - cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); + napi_enable(&priv->napi); cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); - napi_enable(&priv->napi); - cpdma_ctlr_eoi(priv->dma); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + + prim_cpsw = cpsw_get_slave_priv(priv, 0); + if (prim_cpsw->irq_enabled == false) { + if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { + prim_cpsw->irq_enabled = true; + cpsw_enable_irq(prim_cpsw); + } + } + if (priv->data.dual_emac) + priv->slaves[priv->emac_port].open_stat = true; return 0; -} -static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) -{ - if (!slave->phy) - return; - phy_stop(slave->phy); - phy_disconnect(slave->phy); - slave->phy = NULL; +err_cleanup: + cpdma_ctlr_stop(priv->dma); + for_each_slave(priv, cpsw_slave_stop, priv); + pm_runtime_put_sync(&priv->pdev->dev); + netif_carrier_off(priv->ndev); + return ret; } static int cpsw_ndo_stop(struct net_device *ndev) @@ -701,12 +1302,18 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_stop_queue(priv->ndev); napi_disable(&priv->napi); netif_carrier_off(priv->ndev); - cpsw_intr_disable(priv); - cpdma_ctlr_int_ctrl(priv->dma, false); - cpdma_ctlr_stop(priv->dma); - cpsw_ale_stop(priv->ale); + + if (cpsw_common_res_usage_state(priv) <= 1) { + cpts_unregister(priv->cpts); + cpsw_intr_disable(priv); + cpdma_ctlr_int_ctrl(priv->dma, false); + cpdma_ctlr_stop(priv->dma); + cpsw_ale_stop(priv->ale); + } for_each_slave(priv, cpsw_slave_stop, priv); pm_runtime_put_sync(&priv->pdev->dev); + if (priv->data.dual_emac) + priv->slaves[priv->emac_port].open_stat = false; return 0; } @@ -720,60 +1327,43 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { cpsw_err(priv, tx_err, "packet pad failed\n"); - priv->stats.tx_dropped++; + ndev->stats.tx_dropped++; return NETDEV_TX_OK; } - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable) + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + priv->cpts->tx_enable) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_tx_timestamp(skb); - ret = cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, GFP_KERNEL); + ret = cpsw_tx_packet_submit(ndev, priv, skb); if (unlikely(ret != 0)) { cpsw_err(priv, tx_err, "desc submit failed\n"); goto fail; } + /* If there is no more tx desc left free then we need to + * tell the kernel to stop sending us tx frames. + */ + if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) + netif_stop_queue(ndev); + return NETDEV_TX_OK; fail: - priv->stats.tx_dropped++; + ndev->stats.tx_dropped++; netif_stop_queue(ndev); return NETDEV_TX_BUSY; } -static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags) -{ - /* - * The switch cannot operate in promiscuous mode without substantial - * headache. For promiscuous mode to work, we would need to put the - * ALE in bypass mode and route all traffic to the host port. - * Subsequently, the host will need to operate as a "bridge", learn, - * and flood as needed. For now, we simply complain here and - * do nothing about it :-) - */ - if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC)) - dev_err(&ndev->dev, "promiscuity ignored!\n"); - - /* - * The switch cannot filter multicast traffic unless it is configured - * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a - * whole bunch of additional logic that this driver does not implement - * at present. - */ - if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI)) - dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n"); -} - #ifdef CONFIG_TI_CPTS static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) { - struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; + struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; u32 ts_en, seq_id; - if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) { + if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { slave_write(slave, 0, CPSW1_TS_CTL); return; } @@ -781,10 +1371,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; - if (priv->cpts.tx_enable) + if (priv->cpts->tx_enable) ts_en |= CPSW_V1_TS_TX_EN; - if (priv->cpts.rx_enable) + if (priv->cpts->rx_enable) ts_en |= CPSW_V1_TS_RX_EN; slave_write(slave, ts_en, CPSW1_TS_CTL); @@ -793,17 +1383,36 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) { - struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; + struct cpsw_slave *slave; u32 ctrl, mtype; + if (priv->data.dual_emac) + slave = &priv->slaves[priv->emac_port]; + else + slave = &priv->slaves[priv->data.active_slave]; + ctrl = slave_read(slave, CPSW2_CONTROL); - ctrl &= ~CTRL_ALL_TS_MASK; + switch (priv->version) { + case CPSW_VERSION_2: + ctrl &= ~CTRL_V2_ALL_TS_MASK; + + if (priv->cpts->tx_enable) + ctrl |= CTRL_V2_TX_TS_BITS; - if (priv->cpts.tx_enable) - ctrl |= CTRL_TX_TS_BITS; + if (priv->cpts->rx_enable) + ctrl |= CTRL_V2_RX_TS_BITS; + break; + case CPSW_VERSION_3: + default: + ctrl &= ~CTRL_V3_ALL_TS_MASK; + + if (priv->cpts->tx_enable) + ctrl |= CTRL_V3_TX_TS_BITS; - if (priv->cpts.rx_enable) - ctrl |= CTRL_RX_TS_BITS; + if (priv->cpts->rx_enable) + ctrl |= CTRL_V3_RX_TS_BITS; + break; + } mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; @@ -812,12 +1421,17 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) __raw_writel(ETH_P_1588, &priv->regs->ts_ltype); } -static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) +static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) { struct cpsw_priv *priv = netdev_priv(dev); - struct cpts *cpts = &priv->cpts; + struct cpts *cpts = priv->cpts; struct hwtstamp_config cfg; + if (priv->version != CPSW_VERSION_1 && + priv->version != CPSW_VERSION_2 && + priv->version != CPSW_VERSION_3) + return -EOPNOTSUPP; + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; @@ -825,16 +1439,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) if (cfg.flags) return -EINVAL; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - cpts->tx_enable = 0; - break; - case HWTSTAMP_TX_ON: - cpts->tx_enable = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -861,32 +1467,64 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } + cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON; + switch (priv->version) { case CPSW_VERSION_1: cpsw_hwtstamp_v1(priv); break; case CPSW_VERSION_2: + case CPSW_VERSION_3: cpsw_hwtstamp_v2(priv); break; default: - return -ENOTSUPP; + WARN_ON(1); } return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; } +static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct cpsw_priv *priv = netdev_priv(dev); + struct cpts *cpts = priv->cpts; + struct hwtstamp_config cfg; + + if (priv->version != CPSW_VERSION_1 && + priv->version != CPSW_VERSION_2 && + priv->version != CPSW_VERSION_3) + return -EOPNOTSUPP; + + cfg.flags = 0; + cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg.rx_filter = (cpts->rx_enable ? + HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + #endif /*CONFIG_TI_CPTS*/ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { + struct cpsw_priv *priv = netdev_priv(dev); + int slave_no = cpsw_slave_index(priv); + if (!netif_running(dev)) return -EINVAL; + switch (cmd) { #ifdef CONFIG_TI_CPTS - if (cmd == SIOCSHWTSTAMP) - return cpsw_hwtstamp_ioctl(dev, req); + case SIOCSHWTSTAMP: + return cpsw_hwtstamp_set(dev, req); + case SIOCGHWTSTAMP: + return cpsw_hwtstamp_get(dev, req); #endif - return -ENOTSUPP; + } + + if (!priv->slaves[slave_no].phy) + return -EOPNOTSUPP; + return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd); } static void cpsw_ndo_tx_timeout(struct net_device *ndev) @@ -894,20 +1532,43 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) struct cpsw_priv *priv = netdev_priv(ndev); cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); - priv->stats.tx_errors++; + ndev->stats.tx_errors++; cpsw_intr_disable(priv); cpdma_ctlr_int_ctrl(priv->dma, false); cpdma_chan_stop(priv->txch); cpdma_chan_start(priv->txch); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + } -static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) +static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) { struct cpsw_priv *priv = netdev_priv(ndev); - return &priv->stats; + struct sockaddr *addr = (struct sockaddr *)p; + int flags = 0; + u16 vid = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (priv->data.dual_emac) { + vid = priv->slaves[priv->emac_port].port_vlan; + flags = ALE_VLAN; + } + + cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port, + flags, vid); + cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port, + flags, vid); + + memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); + memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + for_each_slave(priv, cpsw_set_slave_mac, priv); + + return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -920,33 +1581,104 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) cpsw_interrupt(ndev->irq, priv); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + } #endif +static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, + unsigned short vid) +{ + int ret; + + ret = cpsw_ale_add_vlan(priv->ale, vid, + ALE_ALL_PORTS << priv->host_port, + 0, ALE_ALL_PORTS << priv->host_port, + (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); + if (ret != 0) + return ret; + + ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); + if (ret != 0) + goto clean_vid; + + ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + ALE_ALL_PORTS << priv->host_port, + ALE_VLAN, vid, 0); + if (ret != 0) + goto clean_vlan_ucast; + return 0; + +clean_vlan_ucast: + cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); +clean_vid: + cpsw_ale_del_vlan(priv->ale, vid, 0); + return ret; +} + +static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + if (vid == priv->data.default_vlan) + return 0; + + dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); + return cpsw_add_vlan_ale_entry(priv, vid); +} + +static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + if (vid == priv->data.default_vlan) + return 0; + + dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); + ret = cpsw_ale_del_vlan(priv->ale, vid, 0); + if (ret != 0) + return ret; + + ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); + if (ret != 0) + return ret; + + return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); +} + static const struct net_device_ops cpsw_netdev_ops = { .ndo_open = cpsw_ndo_open, .ndo_stop = cpsw_ndo_stop, .ndo_start_xmit = cpsw_ndo_start_xmit, - .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, + .ndo_set_mac_address = cpsw_ndo_set_mac_address, .ndo_do_ioctl = cpsw_ndo_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_tx_timeout = cpsw_ndo_tx_timeout, - .ndo_get_stats = cpsw_ndo_get_stats, .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cpsw_ndo_poll_controller, #endif + .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, }; static void cpsw_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct cpsw_priv *priv = netdev_priv(ndev); - strcpy(info->driver, "TI CPSW Driver v1.0"); - strcpy(info->version, "1.0"); - strcpy(info->bus_info, priv->pdev->name); + + strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver)); + strlcpy(info->version, "1.0", sizeof(info->version)); + strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info)); } static u32 cpsw_get_msglevel(struct net_device *ndev) @@ -974,7 +1706,7 @@ static int cpsw_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = priv->cpts.phc_index; + info->phc_index = priv->cpts->phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -993,12 +1725,67 @@ static int cpsw_get_ts_info(struct net_device *ndev, return 0; } +static int cpsw_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd); + else + return -EOPNOTSUPP; +} + +static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd); + else + return -EOPNOTSUPP; +} + +static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + wol->supported = 0; + wol->wolopts = 0; + + if (priv->slaves[slave_no].phy) + phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol); +} + +static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol); + else + return -EOPNOTSUPP; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, .set_msglevel = cpsw_set_msglevel, .get_link = ethtool_op_get_link, .get_ts_info = cpsw_get_ts_info, + .get_settings = cpsw_get_settings, + .set_settings = cpsw_set_settings, + .get_coalesce = cpsw_get_coalesce, + .set_coalesce = cpsw_set_coalesce, + .get_sset_count = cpsw_get_sset_count, + .get_strings = cpsw_get_strings, + .get_ethtool_stats = cpsw_get_ethtool_stats, + .get_wol = cpsw_get_wol, + .set_wol = cpsw_set_wol, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@ -1011,6 +1798,7 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, slave->data = data; slave->regs = regs + slave_reg_ofs; slave->sliver = regs + sliver_reg_ofs; + slave->port_vlan = data->dual_emac_res_vlan; } static int cpsw_probe_dt(struct cpsw_platform_data *data, @@ -1025,83 +1813,77 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, return -EINVAL; if (of_property_read_u32(node, "slaves", &prop)) { - pr_err("Missing slaves property in the DT.\n"); + dev_err(&pdev->dev, "Missing slaves property in the DT.\n"); return -EINVAL; } data->slaves = prop; - if (of_property_read_u32(node, "cpts_active_slave", &prop)) { - pr_err("Missing cpts_active_slave property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + if (of_property_read_u32(node, "active_slave", &prop)) { + dev_err(&pdev->dev, "Missing active_slave property in the DT.\n"); + return -EINVAL; } - data->cpts_active_slave = prop; + data->active_slave = prop; if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { - pr_err("Missing cpts_clock_mult property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n"); + return -EINVAL; } data->cpts_clock_mult = prop; if (of_property_read_u32(node, "cpts_clock_shift", &prop)) { - pr_err("Missing cpts_clock_shift property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n"); + return -EINVAL; } data->cpts_clock_shift = prop; - data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) * - data->slaves, GFP_KERNEL); - if (!data->slave_data) { - pr_err("Could not allocate slave memory.\n"); - return -EINVAL; - } + data->slave_data = devm_kzalloc(&pdev->dev, data->slaves + * sizeof(struct cpsw_slave_data), + GFP_KERNEL); + if (!data->slave_data) + return -ENOMEM; if (of_property_read_u32(node, "cpdma_channels", &prop)) { - pr_err("Missing cpdma_channels property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n"); + return -EINVAL; } data->channels = prop; if (of_property_read_u32(node, "ale_entries", &prop)) { - pr_err("Missing ale_entries property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n"); + return -EINVAL; } data->ale_entries = prop; if (of_property_read_u32(node, "bd_ram_size", &prop)) { - pr_err("Missing bd_ram_size property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n"); + return -EINVAL; } data->bd_ram_size = prop; if (of_property_read_u32(node, "rx_descs", &prop)) { - pr_err("Missing rx_descs property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n"); + return -EINVAL; } data->rx_descs = prop; if (of_property_read_u32(node, "mac_control", &prop)) { - pr_err("Missing mac_control property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + dev_err(&pdev->dev, "Missing mac_control property in the DT.\n"); + return -EINVAL; } data->mac_control = prop; + if (of_property_read_bool(node, "dual_emac")) + data->dual_emac = 1; + /* * Populate all the child nodes here... */ ret = of_platform_populate(node, NULL, NULL, &pdev->dev); /* We do not want to force this, as in some cases may not have child */ if (ret) - pr_warn("Doesn't have any child node\n"); + dev_warn(&pdev->dev, "Doesn't have any child node\n"); - for_each_node_by_name(slave_node, "slave") { + for_each_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; const void *mac_addr = NULL; u32 phyid; @@ -1110,15 +1892,23 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, struct device_node *mdio_node; struct platform_device *mdio; + /* This is no slave child node, continue */ + if (strcmp(slave_node->name, "slave")) + continue; + parp = of_get_property(slave_node, "phy_id", &lenp); - if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) { - pr_err("Missing slave[%d] phy_id property\n", i); - ret = -EINVAL; - goto error_ret; + if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { + dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); + return -EINVAL; } mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); phyid = be32_to_cpup(parp+1); mdio = of_find_device_by_node(mdio_node); + of_node_put(mdio_node); + if (!mdio) { + pr_err("Missing mdio platform device\n"); + return -EINVAL; + } snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); @@ -1126,31 +1916,123 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (mac_addr) memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + slave_data->phy_if = of_get_phy_mode(slave_node); + if (slave_data->phy_if < 0) { + dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", + i); + return slave_data->phy_if; + } + + if (data->dual_emac) { + if (of_property_read_u32(slave_node, "dual_emac_res_vlan", + &prop)) { + dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n"); + slave_data->dual_emac_res_vlan = i+1; + dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n", + slave_data->dual_emac_res_vlan, i); + } else { + slave_data->dual_emac_res_vlan = prop; + } + } + i++; + if (i == data->slaves) + break; } return 0; +} + +static int cpsw_probe_dual_emac(struct platform_device *pdev, + struct cpsw_priv *priv) +{ + struct cpsw_platform_data *data = &priv->data; + struct net_device *ndev; + struct cpsw_priv *priv_sl2; + int ret = 0, i; + + ndev = alloc_etherdev(sizeof(struct cpsw_priv)); + if (!ndev) { + dev_err(&pdev->dev, "cpsw: error allocating net_device\n"); + return -ENOMEM; + } + + priv_sl2 = netdev_priv(ndev); + spin_lock_init(&priv_sl2->lock); + priv_sl2->data = *data; + priv_sl2->pdev = pdev; + priv_sl2->ndev = ndev; + priv_sl2->dev = &ndev->dev; + priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); + priv_sl2->rx_packet_max = max(rx_packet_max, 128); + + if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { + memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, + ETH_ALEN); + dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); + } else { + random_ether_addr(priv_sl2->mac_addr); + dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); + } + memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); + + priv_sl2->slaves = priv->slaves; + priv_sl2->clk = priv->clk; + + priv_sl2->coal_intvl = 0; + priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; + + priv_sl2->regs = priv->regs; + priv_sl2->host_port = priv->host_port; + priv_sl2->host_port_regs = priv->host_port_regs; + priv_sl2->wr_regs = priv->wr_regs; + priv_sl2->hw_stats = priv->hw_stats; + priv_sl2->dma = priv->dma; + priv_sl2->txch = priv->txch; + priv_sl2->rxch = priv->rxch; + priv_sl2->ale = priv->ale; + priv_sl2->emac_port = 1; + priv->slaves[1].ndev = ndev; + priv_sl2->cpts = priv->cpts; + priv_sl2->version = priv->version; + + for (i = 0; i < priv->num_irqs; i++) { + priv_sl2->irqs_table[i] = priv->irqs_table[i]; + priv_sl2->num_irqs = priv->num_irqs; + } + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + ndev->netdev_ops = &cpsw_netdev_ops; + ndev->ethtool_ops = &cpsw_ethtool_ops; + netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); + + /* register the network device */ + SET_NETDEV_DEV(ndev, &pdev->dev); + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "cpsw: error registering net device\n"); + free_netdev(ndev); + ret = -ENODEV; + } -error_ret: - kfree(data->slave_data); return ret; } static int cpsw_probe(struct platform_device *pdev) { - struct cpsw_platform_data *data = pdev->dev.platform_data; + struct cpsw_platform_data *data; struct net_device *ndev; struct cpsw_priv *priv; struct cpdma_params dma_params; struct cpsw_ale_params ale_params; - void __iomem *ss_regs, *wr_regs; - struct resource *res; + void __iomem *ss_regs; + struct resource *res, *ss_res; u32 slave_offset, sliver_offset, slave_size; int ret = 0, i, k = 0; ndev = alloc_etherdev(sizeof(struct cpsw_priv)); if (!ndev) { - pr_err("error allocating net_device\n"); + dev_err(&pdev->dev, "error allocating net_device\n"); return -ENOMEM; } @@ -1162,85 +2044,82 @@ static int cpsw_probe(struct platform_device *pdev) priv->dev = &ndev->dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->rx_packet_max = max(rx_packet_max, 128); + priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); + priv->irq_enabled = true; + if (!priv->cpts) { + dev_err(&pdev->dev, "error allocating cpts\n"); + goto clean_ndev_ret; + } /* * This may be required here for child devices. */ pm_runtime_enable(&pdev->dev); + /* Select default pin state */ + pinctrl_pm_select_default_state(&pdev->dev); + if (cpsw_probe_dt(&priv->data, pdev)) { - pr_err("cpsw: platform data missing\n"); + dev_err(&pdev->dev, "cpsw: platform data missing\n"); ret = -ENODEV; - goto clean_ndev_ret; + goto clean_runtime_disable_ret; } data = &priv->data; if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); - pr_info("Detected MACID = %pM", priv->mac_addr); + dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr); } else { eth_random_addr(priv->mac_addr); - pr_info("Random MACID = %pM", priv->mac_addr); + dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr); } memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); - priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves, - GFP_KERNEL); + priv->slaves = devm_kzalloc(&pdev->dev, + sizeof(struct cpsw_slave) * data->slaves, + GFP_KERNEL); if (!priv->slaves) { - ret = -EBUSY; - goto clean_ndev_ret; + ret = -ENOMEM; + goto clean_runtime_disable_ret; } for (i = 0; i < data->slaves; i++) priv->slaves[i].slave_num = i; - priv->clk = clk_get(&pdev->dev, "fck"); + priv->slaves[0].ndev = ndev; + priv->emac_port = 0; + + priv->clk = devm_clk_get(&pdev->dev, "fck"); if (IS_ERR(priv->clk)) { - dev_err(&pdev->dev, "fck is not found\n"); + dev_err(priv->dev, "fck is not found\n"); ret = -ENODEV; - goto clean_slave_ret; - } - - priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!priv->cpsw_res) { - dev_err(priv->dev, "error getting i/o resource\n"); - ret = -ENOENT; - goto clean_clk_ret; - } - if (!request_mem_region(priv->cpsw_res->start, - resource_size(priv->cpsw_res), ndev->name)) { - dev_err(priv->dev, "failed request i/o region\n"); - ret = -ENXIO; - goto clean_clk_ret; + goto clean_runtime_disable_ret; } - ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res)); - if (!ss_regs) { - dev_err(priv->dev, "unable to map i/o region\n"); - goto clean_cpsw_iores_ret; + priv->coal_intvl = 0; + priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; + + ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); + if (IS_ERR(ss_regs)) { + ret = PTR_ERR(ss_regs); + goto clean_runtime_disable_ret; } priv->regs = ss_regs; - priv->version = __raw_readl(&priv->regs->id_ver); priv->host_port = HOST_PORT_NUM; - priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!priv->cpsw_wr_res) { - dev_err(priv->dev, "error getting i/o resource\n"); - ret = -ENOENT; - goto clean_iomap_ret; - } - if (!request_mem_region(priv->cpsw_wr_res->start, - resource_size(priv->cpsw_wr_res), ndev->name)) { - dev_err(priv->dev, "failed request i/o region\n"); - ret = -ENXIO; - goto clean_iomap_ret; - } - wr_regs = ioremap(priv->cpsw_wr_res->start, - resource_size(priv->cpsw_wr_res)); - if (!wr_regs) { - dev_err(priv->dev, "unable to map i/o region\n"); - goto clean_cpsw_wr_iores_ret; + /* Need to enable clocks with runtime PM api to access module + * registers + */ + pm_runtime_get_sync(&pdev->dev); + priv->version = readl(&priv->regs->id_ver); + pm_runtime_put_sync(&pdev->dev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->wr_regs)) { + ret = PTR_ERR(priv->wr_regs); + goto clean_runtime_disable_ret; } - priv->wr_regs = wr_regs; memset(&dma_params, 0, sizeof(dma_params)); memset(&ale_params, 0, sizeof(ale_params)); @@ -1248,7 +2127,8 @@ static int cpsw_probe(struct platform_device *pdev) switch (priv->version) { case CPSW_VERSION_1: priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; - priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET; + priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; + priv->hw_stats = ss_regs + CPSW1_HW_STATS; dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; @@ -1258,8 +2138,11 @@ static int cpsw_probe(struct platform_device *pdev) dma_params.desc_mem_phys = 0; break; case CPSW_VERSION_2: + case CPSW_VERSION_3: + case CPSW_VERSION_4: priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; - priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET; + priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; + priv->hw_stats = ss_regs + CPSW2_HW_STATS; dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; @@ -1267,12 +2150,12 @@ static int cpsw_probe(struct platform_device *pdev) slave_size = CPSW2_SLAVE_SIZE; sliver_offset = CPSW2_SLIVER_OFFSET; dma_params.desc_mem_phys = - (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET; + (u32 __force) ss_res->start + CPSW2_BD_OFFSET; break; default: dev_err(priv->dev, "unknown version 0x%08x\n", priv->version); ret = -ENODEV; - goto clean_cpsw_wr_iores_ret; + goto clean_runtime_disable_ret; } for (i = 0; i < priv->data.slaves; i++) { struct cpsw_slave *slave = &priv->slaves[i]; @@ -1300,7 +2183,7 @@ static int cpsw_probe(struct platform_device *pdev) if (!priv->dma) { dev_err(priv->dev, "error initializing dma\n"); ret = -ENOMEM; - goto clean_wr_iomap_ret; + goto clean_runtime_disable_ret; } priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), @@ -1335,21 +2218,21 @@ static int cpsw_probe(struct platform_device *pdev) while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { for (i = res->start; i <= res->end; i++) { - if (request_irq(i, cpsw_interrupt, IRQF_DISABLED, - dev_name(&pdev->dev), priv)) { + if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, + dev_name(&pdev->dev), priv)) { dev_err(priv->dev, "error attaching irq\n"); goto clean_ale_ret; } priv->irqs_table[k] = i; - priv->num_irqs = k; + priv->num_irqs = k + 1; } k++; } - ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; - SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); + ndev->ethtool_ops = &cpsw_ethtool_ops; netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); /* register the network device */ @@ -1358,43 +2241,32 @@ static int cpsw_probe(struct platform_device *pdev) if (ret) { dev_err(priv->dev, "error registering net device\n"); ret = -ENODEV; - goto clean_irq_ret; + goto clean_ale_ret; } - if (cpts_register(&pdev->dev, &priv->cpts, - data->cpts_clock_mult, data->cpts_clock_shift)) - dev_err(priv->dev, "error registering cpts device\n"); + cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", + &ss_res->start, ndev->irq); - cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", - priv->cpsw_res->start, ndev->irq); + if (priv->data.dual_emac) { + ret = cpsw_probe_dual_emac(pdev, priv); + if (ret) { + cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); + goto clean_ale_ret; + } + } return 0; -clean_irq_ret: - free_irq(ndev->irq, priv); clean_ale_ret: cpsw_ale_destroy(priv->ale); clean_dma_ret: cpdma_chan_destroy(priv->txch); cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); -clean_wr_iomap_ret: - iounmap(priv->wr_regs); -clean_cpsw_wr_iores_ret: - release_mem_region(priv->cpsw_wr_res->start, - resource_size(priv->cpsw_wr_res)); -clean_iomap_ret: - iounmap(priv->regs); -clean_cpsw_iores_ret: - release_mem_region(priv->cpsw_res->start, - resource_size(priv->cpsw_res)); -clean_clk_ret: - clk_put(priv->clk); -clean_slave_ret: +clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); - kfree(priv->slaves); clean_ndev_ret: - free_netdev(ndev); + free_netdev(priv->ndev); return ret; } @@ -1403,26 +2275,18 @@ static int cpsw_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); - pr_info("removing device"); - platform_set_drvdata(pdev, NULL); + if (priv->data.dual_emac) + unregister_netdev(cpsw_get_slave_ndev(priv, 1)); + unregister_netdev(ndev); - cpts_unregister(&priv->cpts); - free_irq(ndev->irq, priv); cpsw_ale_destroy(priv->ale); cpdma_chan_destroy(priv->txch); cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); - iounmap(priv->regs); - release_mem_region(priv->cpsw_res->start, - resource_size(priv->cpsw_res)); - iounmap(priv->wr_regs); - release_mem_region(priv->cpsw_wr_res->start, - resource_size(priv->cpsw_wr_res)); pm_runtime_disable(&pdev->dev); - clk_put(priv->clk); - kfree(priv->slaves); + if (priv->data.dual_emac) + free_netdev(cpsw_get_slave_ndev(priv, 1)); free_netdev(ndev); - return 0; } @@ -1430,11 +2294,18 @@ static int cpsw_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); + struct cpsw_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) cpsw_ndo_stop(ndev); + + for_each_slave(priv, soft_reset_slave); + pm_runtime_put_sync(&pdev->dev); + /* Select sleep pin state */ + pinctrl_pm_select_sleep_state(&pdev->dev); + return 0; } @@ -1444,6 +2315,10 @@ static int cpsw_resume(struct device *dev) struct net_device *ndev = platform_get_drvdata(pdev); pm_runtime_get_sync(&pdev->dev); + + /* Select default pin state */ + pinctrl_pm_select_default_state(&pdev->dev); + if (netif_running(ndev)) cpsw_ndo_open(ndev); return 0; @@ -1458,13 +2333,14 @@ static const struct of_device_id cpsw_of_mtable[] = { { .compatible = "ti,cpsw", }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, cpsw_of_mtable); static struct platform_driver cpsw_driver = { .driver = { .name = "cpsw", .owner = THIS_MODULE, .pm = &cpsw_pm_ops, - .of_match_table = of_match_ptr(cpsw_of_mtable), + .of_match_table = cpsw_of_mtable, }, .probe = cpsw_probe, .remove = cpsw_remove, diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h new file mode 100644 index 00000000000..574f49da693 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw.h @@ -0,0 +1,44 @@ +/* Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2013 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __CPSW_H__ +#define __CPSW_H__ + +#include <linux/if_ether.h> + +struct cpsw_slave_data { + char phy_id[MII_BUS_ID_SIZE]; + int phy_if; + u8 mac_addr[ETH_ALEN]; + u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ +}; + +struct cpsw_platform_data { + struct cpsw_slave_data *slave_data; + u32 ss_reg_ofs; /* Subsystem control register offset */ + u32 channels; /* number of cpdma channels (symmetric) */ + u32 slaves; /* number of slave cpgmac ports */ + u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ + u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */ + u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ + u32 ale_entries; /* ale table size */ + u32 bd_ram_size; /*buffer descriptor ram size */ + u32 rx_descs; /* Number of Rx Descriptios */ + u32 mac_control; /* Mac control register */ + u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ + bool dual_emac; /* Enable Dual EMAC mode */ +}; + +void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave); + +#endif /* __CPSW_H__ */ diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 0e9ccc2cf91..7f893069c41 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -148,7 +148,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry) return idx; } -static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr) +int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; @@ -160,8 +160,26 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr) type = cpsw_ale_get_entry_type(ale_entry); if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) continue; + if (cpsw_ale_get_vlan_id(ale_entry) != vid) + continue; cpsw_ale_get_addr(ale_entry, entry_addr); - if (memcmp(entry_addr, addr, 6) == 0) + if (ether_addr_equal(entry_addr, addr)) + return idx; + } + return -ENOENT; +} + +int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_VLAN) + continue; + if (cpsw_ale_get_vlan_id(ale_entry) == vid) return idx; } return -ENOENT; @@ -274,19 +292,32 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask) return 0; } -int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags) +static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry, + int flags, u16 vid) +{ + if (flags & ALE_VLAN) { + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR); + cpsw_ale_set_vlan_id(ale_entry, vid); + } else { + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); + } +} + +int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); + cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); + cpsw_ale_set_addr(ale_entry, addr); cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT); cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0); cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_port_num(ale_entry, port); - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) idx = cpsw_ale_match_free(ale); if (idx < 0) @@ -298,12 +329,13 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags) return 0; } -int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port) +int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) return -ENOENT; @@ -313,18 +345,19 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port) } int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, - int super, int mcast_state) + int flags, u16 vid, int mcast_state) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx, mask; - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx >= 0) cpsw_ale_read(ale, idx, ale_entry); - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); + cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); + cpsw_ale_set_addr(ale_entry, addr); - cpsw_ale_set_super(ale_entry, super); + cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_mcast_state(ale_entry, mcast_state); mask = cpsw_ale_get_port_mask(ale_entry); @@ -342,12 +375,13 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, return 0; } -int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask) +int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, + int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) return -EINVAL; @@ -362,6 +396,55 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask) return 0; } +int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, + int reg_mcast, int unreg_mcast) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx >= 0) + cpsw_ale_read(ale, idx, ale_entry); + + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); + cpsw_ale_set_vlan_id(ale_entry, vid); + + cpsw_ale_set_vlan_untag_force(ale_entry, untag); + cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_set_vlan_member_list(ale_entry, port); + + if (idx < 0) + idx = cpsw_ale_match_free(ale); + if (idx < 0) + idx = cpsw_ale_find_ageable(ale); + if (idx < 0) + return -ENOMEM; + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} + +int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx < 0) + return -ENOENT; + + cpsw_ale_read(ale, idx, ale_entry); + + if (port_mask) + cpsw_ale_set_vlan_member_list(ale_entry, port_mask); + else + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} + struct ale_control_info { const char *name; int offset, port_offset; @@ -394,6 +477,14 @@ static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = { .port_shift = 0, .bits = 1, }, + [ALE_P0_UNI_FLOOD] = { + .name = "port0_unicast_flood", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 8, + .port_shift = 0, + .bits = 1, + }, [ALE_VLAN_NOLEARN] = { .name = "vlan_nolearn", .offset = ALE_CONTROL, @@ -490,6 +581,14 @@ static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = { .port_shift = 0, .bits = 1, }, + [ALE_PORT_NO_SA_UPDATE] = { + .name = "no_source_update", + .offset = ALE_PORTCTL, + .port_offset = 4, + .shift = 5, + .port_shift = 0, + .bits = 1, + }, [ALE_PORT_MCAST_LIMIT] = { .name = "mcast_limit", .offset = ALE_PORTCTL, diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 2bd09cbce52..de409c33b25 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -34,6 +34,7 @@ enum cpsw_ale_control { ALE_ENABLE, ALE_CLEAR, ALE_AGEOUT, + ALE_P0_UNI_FLOOD, ALE_VLAN_NOLEARN, ALE_NO_PORT_VLAN, ALE_OUI_DENY, @@ -47,6 +48,7 @@ enum cpsw_ale_control { ALE_PORT_DROP_UNTAGGED, ALE_PORT_DROP_UNKNOWN_VLAN, ALE_PORT_NOLEARN, + ALE_PORT_NO_SA_UPDATE, ALE_PORT_UNKNOWN_VLAN_MEMBER, ALE_PORT_UNKNOWN_MCAST_FLOOD, ALE_PORT_UNKNOWN_REG_MCAST_FLOOD, @@ -64,8 +66,14 @@ enum cpsw_ale_port_state { }; /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */ -#define ALE_SECURE 1 -#define ALE_BLOCKED 2 +#define ALE_SECURE BIT(0) +#define ALE_BLOCKED BIT(1) +#define ALE_SUPER BIT(2) +#define ALE_VLAN BIT(3) + +#define ALE_PORT_HOST BIT(0) +#define ALE_PORT_1 BIT(1) +#define ALE_PORT_2 BIT(2) #define ALE_MCAST_FWD 0 #define ALE_MCAST_BLOCK_LEARN_FWD 1 @@ -81,11 +89,17 @@ void cpsw_ale_stop(struct cpsw_ale *ale); int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); -int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags); -int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port); +int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid); +int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid); int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, - int super, int mcast_state); -int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask); + int flags, u16 vid, int mcast_state); +int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, + int flags, u16 vid); +int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, + int reg_mcast, int unreg_mcast); +int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); int cpsw_ale_control_set(struct cpsw_ale *ale, int port, diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 463597f919f..6b56f85951e 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -26,15 +26,13 @@ #include <linux/time.h> #include <linux/uaccess.h> #include <linux/workqueue.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> #include "cpts.h" #ifdef CONFIG_TI_CPTS -static struct sock_filter ptp_filter[] = { - PTP_FILTER -}; - #define cpts_read32(c, r) __raw_readl(&c->reg->r) #define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r) @@ -94,7 +92,7 @@ static int cpts_fifo_read(struct cpts *cpts, int match) case CPTS_EV_HW: break; default: - pr_err("cpts: unkown event type\n"); + pr_err("cpts: unknown event type\n"); break; } if (type == match) @@ -217,6 +215,7 @@ static struct ptp_clock_info cpts_info = { .name = "CTPS timer", .max_adj = 1000000, .n_ext_ts = 0, + .n_pins = 0, .pps = 0, .adjfreq = cpts_ptp_adjfreq, .adjtime = cpts_ptp_adjtime, @@ -237,13 +236,11 @@ static void cpts_overflow_check(struct work_struct *work) schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD); } -#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk" - -static void cpts_clk_init(struct cpts *cpts) +static void cpts_clk_init(struct device *dev, struct cpts *cpts) { - cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME); + cpts->refclk = devm_clk_get(dev, "cpts"); if (IS_ERR(cpts->refclk)) { - pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME); + dev_err(dev, "Failed to get cpts refclk\n"); cpts->refclk = NULL; return; } @@ -253,7 +250,6 @@ static void cpts_clk_init(struct cpts *cpts) static void cpts_clk_release(struct cpts *cpts) { clk_disable(cpts->refclk); - clk_put(cpts->refclk); } static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, @@ -300,7 +296,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type) u64 ns = 0; struct cpts_event *event; struct list_head *this, *next; - unsigned int class = sk_run_filter(skb, ptp_filter); + unsigned int class = ptp_classify_raw(skb); unsigned long flags; u16 seqid; u8 mtype; @@ -371,10 +367,6 @@ int cpts_register(struct device *dev, struct cpts *cpts, int err, i; unsigned long flags; - if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { - pr_err("cpts: bad ptp filter\n"); - return -EINVAL; - } cpts->info = cpts_info; cpts->clock = ptp_clock_register(&cpts->info, dev); if (IS_ERR(cpts->clock)) { @@ -395,7 +387,7 @@ int cpts_register(struct device *dev, struct cpts *cpts, for (i = 0; i < CPTS_MAX_EVENTS; i++) list_add(&cpts->pool_data[i].list, &cpts->pool); - cpts_clk_init(cpts); + cpts_clk_init(dev, cpts); cpts_write32(cpts, CPTS_EN, control); cpts_write32(cpts, TS_PEND_EN, int_enable); diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h index fe993cdd7e2..1a581ef7eee 100644 --- a/drivers/net/ethernet/ti/cpts.h +++ b/drivers/net/ethernet/ti/cpts.h @@ -127,8 +127,8 @@ struct cpts { }; #ifdef CONFIG_TI_CPTS -extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); -extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb); +void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); +void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb); #else static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) { @@ -138,8 +138,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) } #endif -extern int cpts_register(struct device *dev, struct cpts *cpts, - u32 mult, u32 shift); -extern void cpts_unregister(struct cpts *cpts); +int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift); +void cpts_unregister(struct cpts *cpts); #endif diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 49956730cd8..4a000f6dd6f 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -20,6 +20,7 @@ #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/io.h> +#include <linux/delay.h> #include "davinci_cpdma.h" @@ -60,6 +61,10 @@ #define CPDMA_DESC_EOQ BIT(28) #define CPDMA_DESC_TD_COMPLETE BIT(27) #define CPDMA_DESC_PASS_CRC BIT(26) +#define CPDMA_DESC_TO_PORT_EN BIT(20) +#define CPDMA_TO_PORT_SHIFT 16 +#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) +#define CPDMA_DESC_CRC_LEN 4 #define CPDMA_TEARDOWN_VALUE 0xfffffffc @@ -76,7 +81,7 @@ struct cpdma_desc { }; struct cpdma_desc_pool { - u32 phys; + phys_addr_t phys; u32 hw_addr; void __iomem *iomap; /* ioremap map */ void *cpumap; /* dma_alloc map */ @@ -105,13 +110,13 @@ struct cpdma_ctlr { }; struct cpdma_chan { + struct cpdma_desc __iomem *head, *tail; + void __iomem *hdp, *cp, *rxfree; enum cpdma_state state; struct cpdma_ctlr *ctlr; int chan_num; spinlock_t lock; - struct cpdma_desc __iomem *head, *tail; int count; - void __iomem *hdp, *cp, *rxfree; u32 mask; cpdma_handler_fn handler; enum dma_data_direction dir; @@ -132,6 +137,14 @@ struct cpdma_chan { #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) +#define cpdma_desc_to_port(chan, mode, directed) \ + do { \ + if (!is_rx_chan(chan) && ((directed == 1) || \ + (directed == 2))) \ + mode |= (CPDMA_DESC_TO_PORT_EN | \ + (directed << CPDMA_TO_PORT_SHIFT)); \ + } while (0) + /* * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci * emac) have dedicated on-chip memory for these descriptors. Some other @@ -145,9 +158,9 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, int bitmap_size; struct cpdma_desc_pool *pool; - pool = kzalloc(sizeof(*pool), GFP_KERNEL); + pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); if (!pool) - return NULL; + goto fail; spin_lock_init(&pool->lock); @@ -157,7 +170,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, pool->num_desc = size / pool->desc_size; bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); - pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); if (!pool->bitmap) goto fail; @@ -174,10 +187,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, if (pool->iomap) return pool; - fail: - kfree(pool->bitmap); - kfree(pool); return NULL; } @@ -190,7 +200,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) spin_lock_irqsave(&pool->lock, flags); WARN_ON(pool->used_desc); - kfree(pool->bitmap); if (pool->cpumap) { dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, pool->phys); @@ -198,7 +207,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) iounmap(pool->iomap); } spin_unlock_irqrestore(&pool->lock, flags); - kfree(pool); } static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, @@ -206,8 +214,7 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, { if (!desc) return 0; - return pool->hw_addr + (__force dma_addr_t)desc - - (__force dma_addr_t)pool->iomap; + return pool->hw_addr + (__force long)desc - (__force long)pool->iomap; } static inline struct cpdma_desc __iomem * @@ -217,17 +224,27 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) } static struct cpdma_desc __iomem * -cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc) +cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) { unsigned long flags; int index; + int desc_start; + int desc_end; struct cpdma_desc __iomem *desc = NULL; spin_lock_irqsave(&pool->lock, flags); - index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0, - num_desc, 0); - if (index < pool->num_desc) { + if (is_rx) { + desc_start = 0; + desc_end = pool->num_desc/2; + } else { + desc_start = pool->num_desc/2; + desc_end = pool->num_desc; + } + + index = bitmap_find_next_zero_area(pool->bitmap, + desc_end, desc_start, num_desc, 0); + if (index < desc_end) { bitmap_set(pool->bitmap, index, num_desc); desc = pool->iomap + pool->desc_size * index; pool->used_desc++; @@ -254,7 +271,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) { struct cpdma_ctlr *ctlr; - ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); + ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); if (!ctlr) return NULL; @@ -268,10 +285,8 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) ctlr->params.desc_hw_addr, ctlr->params.desc_mem_size, ctlr->params.desc_align); - if (!ctlr->pool) { - kfree(ctlr); + if (!ctlr->pool) return NULL; - } if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) ctlr->num_chan = CPDMA_MAX_CHANNELS; @@ -291,14 +306,16 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) } if (ctlr->params.has_soft_reset) { - unsigned long timeout = jiffies + HZ/10; + unsigned timeout = 10 * 100; dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); - while (time_before(jiffies, timeout)) { + while (timeout) { if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) break; + udelay(10); + timeout--; } - WARN_ON(!time_before(jiffies, timeout)); + WARN_ON(!timeout); } for (i = 0; i < ctlr->num_chan; i++) { @@ -331,7 +348,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) int i; spin_lock_irqsave(&ctlr->lock, flags); - if (ctlr->state != CPDMA_STATE_ACTIVE) { + if (ctlr->state == CPDMA_STATE_TEARDOWN) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EINVAL; } @@ -439,14 +456,11 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) if (ctlr->state != CPDMA_STATE_IDLE) cpdma_ctlr_stop(ctlr); - for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { - if (ctlr->channels[i]) - cpdma_chan_destroy(ctlr->channels[i]); - } + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) + cpdma_chan_destroy(ctlr->channels[i]); cpdma_desc_pool_destroy(ctlr->pool); spin_unlock_irqrestore(&ctlr->lock, flags); - kfree(ctlr); return ret; } EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); @@ -473,31 +487,34 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); -void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr) +void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) { - dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0); + dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); } +EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, cpdma_handler_fn handler) { struct cpdma_chan *chan; - int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; + int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; unsigned long flags; if (__chan_linear(chan_num) >= ctlr->num_chan) return NULL; - ret = -ENOMEM; - chan = kzalloc(sizeof(*chan), GFP_KERNEL); + chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); if (!chan) - goto err_chan_alloc; + return ERR_PTR(-ENOMEM); spin_lock_irqsave(&ctlr->lock, flags); - ret = -EBUSY; - if (ctlr->channels[chan_num]) - goto err_chan_busy; + if (ctlr->channels[chan_num]) { + spin_unlock_irqrestore(&ctlr->lock, flags); + devm_kfree(ctlr->dev, chan); + return ERR_PTR(-EBUSY); + } chan->ctlr = ctlr; chan->state = CPDMA_STATE_IDLE; @@ -527,12 +544,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, ctlr->channels[chan_num] = chan; spin_unlock_irqrestore(&ctlr->lock, flags); return chan; - -err_chan_busy: - spin_unlock_irqrestore(&ctlr->lock, flags); - kfree(chan); -err_chan_alloc: - return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(cpdma_chan_create); @@ -566,6 +577,7 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan, spin_unlock_irqrestore(&chan->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); int cpdma_chan_dump(struct cpdma_chan *chan) { @@ -652,7 +664,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan, } int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, - int len, gfp_t gfp_mask) + int len, int directed) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; @@ -668,7 +680,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, goto unlock_ret; } - desc = cpdma_desc_alloc(ctlr->pool, 1); + desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); if (!desc) { chan->stats.desc_alloc_fail++; ret = -ENOMEM; @@ -681,7 +693,15 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, } buffer = dma_map_single(ctlr->dev, data, len, chan->dir); + ret = dma_mapping_error(ctlr->dev, buffer); + if (ret) { + cpdma_desc_free(ctlr->pool, desc, 1); + ret = -EINVAL; + goto unlock_ret; + } + mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; + cpdma_desc_to_port(chan, mode, directed); desc_write(desc, hw_next, 0); desc_write(desc, hw_buffer, buffer); @@ -704,6 +724,29 @@ unlock_ret: } EXPORT_SYMBOL_GPL(cpdma_chan_submit); +bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) +{ + unsigned long flags; + int index; + bool ret; + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc_pool *pool = ctlr->pool; + + spin_lock_irqsave(&pool->lock, flags); + + index = bitmap_find_next_zero_area(pool->bitmap, + pool->num_desc, pool->num_desc/2, 1, 0); + + if (index < pool->num_desc) + ret = true; + else + ret = false; + + spin_unlock_irqrestore(&pool->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); + static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc, int outlen, int status) @@ -728,6 +771,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; int status, outlen; + int cb_status = 0; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t desc_dma; unsigned long flags; @@ -749,7 +793,12 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) status = -EBUSY; goto unlock_ret; } - status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE); + + if (status & CPDMA_DESC_PASS_CRC) + outlen -= CPDMA_DESC_CRC_LEN; + + status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | + CPDMA_DESC_PORT_MASK); chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); chan_write(chan, cp, desc_dma); @@ -762,8 +811,12 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) } spin_unlock_irqrestore(&chan->lock, flags); + if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) + cb_status = -ENOSYS; + else + cb_status = status; - __cpdma_chan_free(chan, desc, outlen, status); + __cpdma_chan_free(chan, desc, outlen, cb_status); return status; unlock_ret: @@ -822,10 +875,10 @@ int cpdma_chan_stop(struct cpdma_chan *chan) struct cpdma_desc_pool *pool = ctlr->pool; unsigned long flags; int ret; - unsigned long timeout; + unsigned timeout; spin_lock_irqsave(&chan->lock, flags); - if (chan->state != CPDMA_STATE_ACTIVE) { + if (chan->state == CPDMA_STATE_TEARDOWN) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } @@ -837,14 +890,15 @@ int cpdma_chan_stop(struct cpdma_chan *chan) dma_reg_write(ctlr, chan->td, chan_linear(chan)); /* wait for teardown complete */ - timeout = jiffies + HZ/10; /* 100 msec */ - while (time_before(jiffies, timeout)) { + timeout = 100 * 100; /* 100 ms */ + while (timeout) { u32 cp = chan_read(chan, cp); if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) break; - cpu_relax(); + udelay(10); + timeout--; } - WARN_ON(!time_before(jiffies, timeout)); + WARN_ON(!timeout); chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); /* handle completed packets */ @@ -904,7 +958,7 @@ struct cpdma_control_info { #define ACCESS_RW (ACCESS_RO | ACCESS_WO) }; -struct cpdma_control_info controls[] = { +static struct cpdma_control_info controls[] = { [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, @@ -984,3 +1038,6 @@ unlock_ret: spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } +EXPORT_SYMBOL_GPL(cpdma_control_set); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index afa19a0c0d8..86dee487f2f 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -24,6 +24,13 @@ #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) #define chan_linear(chan) __chan_linear((chan)->chan_num) +#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7) + +#define CPDMA_EOI_RX_THRESH 0x0 +#define CPDMA_EOI_RX 0x1 +#define CPDMA_EOI_TX 0x2 +#define CPDMA_EOI_MISC 0x3 + struct cpdma_params { struct device *dev; void __iomem *dmaregs; @@ -82,12 +89,13 @@ int cpdma_chan_dump(struct cpdma_chan *chan); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats); int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, - int len, gfp_t gfp_mask); + int len, int directed); int cpdma_chan_process(struct cpdma_chan *chan, int quota); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); -void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr); +void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable); +bool cpdma_check_free_tx_desc(struct cpdma_chan *chan); enum cpdma_control { CPDMA_CMD_IDLE, /* write-only */ diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 2a3e2c56bc6..35a139e9a83 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -61,6 +61,7 @@ #include <linux/davinci_emac.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_net.h> @@ -120,7 +121,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_DEF_TX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_NUM_DESC (128) -#define EMAC_DEF_TX_NUM_DESC (128) #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ #define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ @@ -342,7 +342,6 @@ struct emac_priv { u32 mac_hash2; u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; u32 rx_addr_type; - atomic_t cur_tx; const char *phy_id; #ifdef CONFIG_OF struct device_node *phy_node; @@ -480,8 +479,8 @@ static void emac_dump_regs(struct emac_priv *priv) static void emac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - strcpy(info->driver, emac_version_string); - strcpy(info->version, EMAC_MODULE_VERSION); + strlcpy(info->driver, emac_version_string, sizeof(info->driver)); + strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version)); } /** @@ -878,8 +877,7 @@ static void emac_dev_mcast_set(struct net_device *ndev) netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) { mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); - } - if (!netdev_mc_empty(ndev)) { + } else if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); @@ -1039,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status) recycle: ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); + skb_tailroom(skb), 0); WARN_ON(ret == -ENOMEM); if (unlikely(ret < 0)) @@ -1050,12 +1048,12 @@ static void emac_tx_handler(void *token, int len, int status) { struct sk_buff *skb = token; struct net_device *ndev = skb->dev; - struct emac_priv *priv = netdev_priv(ndev); - - atomic_dec(&priv->cur_tx); + /* Check whether the queue is stopped due to stalled tx dma, if the + * queue is stopped then start the queue as we have free desc for tx + */ if (unlikely(netif_queue_stopped(ndev))) - netif_start_queue(ndev); + netif_wake_queue(ndev); ndev->stats.tx_packets++; ndev->stats.tx_bytes += len; dev_kfree_skb_any(skb); @@ -1094,14 +1092,17 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len, - GFP_KERNEL); + 0); if (unlikely(ret_code != 0)) { if (netif_msg_tx_err(priv) && net_ratelimit()) dev_err(emac_dev, "DaVinci EMAC: desc submit failed"); goto fail_tx; } - if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC) + /* If there is no more tx desc left free then we need to + * tell the kernel to stop sending us tx frames. + */ + if (unlikely(!cpdma_check_free_tx_desc(priv->txchan))) netif_stop_queue(ndev); return NETDEV_TX_OK; @@ -1264,7 +1265,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) /* Store mac addr in priv and rx channel and set it in EMAC hw */ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); - ndev->addr_assign_type &= ~NET_ADDR_RANDOM; /* MAC address is configured only after the interface is enabled. */ if (netif_running(ndev)) { @@ -1438,7 +1438,7 @@ static int emac_poll(struct napi_struct *napi, int budget) * Polled functionality used by netconsole and others in non interrupt mode * */ -void emac_poll_controller(struct net_device *ndev) +static void emac_poll_controller(struct net_device *ndev) { struct emac_priv *priv = netdev_priv(ndev); @@ -1533,8 +1533,8 @@ static int emac_dev_open(struct net_device *ndev) u32 cnt; struct resource *res; int q, m, ret; + int res_num = 0, irq_num = 0; int i = 0; - int k = 0; struct emac_priv *priv = netdev_priv(ndev); pm_runtime_get(&priv->pdev->dev); @@ -1558,21 +1558,29 @@ static int emac_dev_open(struct net_device *ndev) break; ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); + skb_tailroom(skb), 0); if (WARN_ON(ret < 0)) break; } /* Request IRQ */ + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, + res_num))) { + for (irq_num = res->start; irq_num <= res->end; irq_num++) { + if (request_irq(irq_num, emac_irq, 0, ndev->name, + ndev)) { + dev_err(emac_dev, + "DaVinci EMAC: request_irq() failed\n"); + ret = -EBUSY; - while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { - for (i = res->start; i <= res->end; i++) { - if (request_irq(i, emac_irq, IRQF_DISABLED, - ndev->name, ndev)) goto rollback; + } } - k++; + res_num++; } + /* prepare counters for rollback in case of an error */ + res_num--; + irq_num--; /* Start/Enable EMAC hardware */ emac_hw_enable(priv); @@ -1600,7 +1608,7 @@ static int emac_dev_open(struct net_device *ndev) if (priv->phy_id && *priv->phy_id) { priv->phydev = phy_connect(ndev, priv->phy_id, - &emac_adjust_link, 0, + &emac_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(priv->phydev)) { @@ -1639,19 +1647,23 @@ static int emac_dev_open(struct net_device *ndev) return 0; -rollback: - - dev_err(emac_dev, "DaVinci EMAC: request_irq() failed"); +err: + emac_int_disable(priv); + napi_disable(&priv->napi); - for (q = k; k >= 0; k--) { - for (m = i; m >= res->start; m--) +rollback: + for (q = res_num; q >= 0; q--) { + res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q); + /* at the first iteration, irq_num is already set to the + * right value + */ + if (q != res_num) + irq_num = res->end; + + for (m = irq_num; m >= res->start; m--) free_irq(m, ndev); - res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1); - m = res->end; } - - ret = -EBUSY; -err: + cpdma_ctlr_stop(priv->dma); pm_runtime_put(&priv->pdev->dev); return ret; } @@ -1771,29 +1783,26 @@ static const struct net_device_ops emac_netdev_ops = { #endif }; -#ifdef CONFIG_OF -static struct emac_platform_data - *davinci_emac_of_get_pdata(struct platform_device *pdev, - struct emac_priv *priv) +static const struct of_device_id davinci_emac_of_match[]; + +static struct emac_platform_data * +davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) { struct device_node *np; + const struct of_device_id *match; + const struct emac_platform_data *auxdata; struct emac_platform_data *pdata = NULL; const u8 *mac_addr; - u32 data; - int ret; - pdata = pdev->dev.platform_data; - if (!pdata) { - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - goto nodata; - } + if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) + return dev_get_platdata(&pdev->dev); + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; np = pdev->dev.of_node; - if (!np) - goto nodata; - else - pdata->version = EMAC_VERSION_2; + pdata->version = EMAC_VERSION_2; if (!is_valid_ether_addr(pdata->mac_addr)) { mac_addr = of_get_mac_address(np); @@ -1801,47 +1810,44 @@ static struct emac_platform_data memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); } - ret = of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &data); - if (!ret) - pdata->ctrl_reg_offset = data; + of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", + &pdata->ctrl_reg_offset); - ret = of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset", - &data); - if (!ret) - pdata->ctrl_mod_reg_offset = data; + of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset", + &pdata->ctrl_mod_reg_offset); - ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", &data); - if (!ret) - pdata->ctrl_ram_offset = data; + of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", + &pdata->ctrl_ram_offset); - ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-size", &data); - if (!ret) - pdata->ctrl_ram_size = data; + of_property_read_u32(np, "ti,davinci-ctrl-ram-size", + &pdata->ctrl_ram_size); - ret = of_property_read_u32(np, "ti,davinci-rmii-en", &data); - if (!ret) - pdata->rmii_en = data; + of_property_read_u8(np, "ti,davinci-rmii-en", &pdata->rmii_en); - ret = of_property_read_u32(np, "ti,davinci-no-bd-ram", &data); - if (!ret) - pdata->no_bd_ram = data; + pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram"); priv->phy_node = of_parse_phandle(np, "phy-handle", 0); if (!priv->phy_node) - pdata->phy_id = ""; + pdata->phy_id = NULL; + + auxdata = pdev->dev.platform_data; + if (auxdata) { + pdata->interrupt_enable = auxdata->interrupt_enable; + pdata->interrupt_disable = auxdata->interrupt_disable; + } + + match = of_match_device(davinci_emac_of_match, &pdev->dev); + if (match && match->data) { + auxdata = match->data; + pdata->version = auxdata->version; + pdata->hw_ram_addr = auxdata->hw_ram_addr; + } pdev->dev.platform_data = pdata; -nodata: + return pdata; } -#else -static struct emac_platform_data - *davinci_emac_of_get_pdata(struct platform_device *pdev, - struct emac_priv *priv) -{ - return pdev->dev.platform_data; -} -#endif + /** * davinci_emac_probe - EMAC device probe * @pdev: The DaVinci EMAC device that we are removing @@ -1856,30 +1862,26 @@ static int davinci_emac_probe(struct platform_device *pdev) struct resource *res; struct net_device *ndev; struct emac_priv *priv; - unsigned long size, hw_ram_addr; + unsigned long hw_ram_addr; struct emac_platform_data *pdata; - struct device *emac_dev; struct cpdma_params dma_params; struct clk *emac_clk; unsigned long emac_bus_frequency; /* obtain emac clock from kernel */ - emac_clk = clk_get(&pdev->dev, NULL); + emac_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(emac_clk)) { dev_err(&pdev->dev, "failed to get EMAC clock\n"); return -EBUSY; } emac_bus_frequency = clk_get_rate(emac_clk); - clk_put(emac_clk); /* TODO: Probe PHY here if possible */ ndev = alloc_etherdev(sizeof(struct emac_priv)); - if (!ndev) { - rc = -ENOMEM; - goto no_ndev; - } + if (!ndev) + return -ENOMEM; platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); @@ -1893,11 +1895,11 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); rc = -ENODEV; - goto probe_quit; + goto no_pdata; } /* MAC addr and PHY mask , RMII enable info from platform_data */ - memcpy(priv->mac_addr, pdata->mac_addr, 6); + memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN); priv->phy_id = pdata->phy_id; priv->rmii_en = pdata->rmii_en; priv->version = pdata->version; @@ -1907,29 +1909,13 @@ static int davinci_emac_probe(struct platform_device *pdev) priv->coal_intvl = 0; priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000); - emac_dev = &ndev->dev; /* Get EMAC platform data */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev,"error getting res\n"); - rc = -ENOENT; - goto probe_quit; - } - priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; - size = resource_size(res); - if (!request_mem_region(res->start, size, ndev->name)) { - dev_err(&pdev->dev, "failed request_mem_region() for regs\n"); - rc = -ENXIO; - goto probe_quit; - } - - priv->remap_addr = ioremap(res->start, size); - if (!priv->remap_addr) { - dev_err(&pdev->dev, "unable to map IO\n"); - rc = -ENOMEM; - release_mem_region(res->start, size); - goto probe_quit; + priv->remap_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->remap_addr)) { + rc = PTR_ERR(priv->remap_addr); + goto no_pdata; } priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; ndev->base_addr = (unsigned long)priv->remap_addr; @@ -1941,7 +1927,7 @@ static int davinci_emac_probe(struct platform_device *pdev) hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; memset(&dma_params, 0, sizeof(dma_params)); - dma_params.dev = emac_dev; + dma_params.dev = &pdev->dev; dma_params.dmaregs = priv->emac_base; dma_params.rxthresh = priv->emac_base + 0x120; dma_params.rxfree = priv->emac_base + 0x140; @@ -1962,7 +1948,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!priv->dma) { dev_err(&pdev->dev, "error initializing DMA\n"); rc = -ENOMEM; - goto no_dma; + goto no_pdata; } priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), @@ -1971,14 +1957,14 @@ static int davinci_emac_probe(struct platform_device *pdev) emac_rx_handler); if (WARN_ON(!priv->txchan || !priv->rxchan)) { rc = -ENOMEM; - goto no_irq_res; + goto no_cpdma_chan; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "error getting irq res\n"); rc = -ENOENT; - goto no_irq_res; + goto no_cpdma_chan; } ndev->irq = res->start; @@ -1991,7 +1977,7 @@ static int davinci_emac_probe(struct platform_device *pdev) } ndev->netdev_ops = &emac_netdev_ops; - SET_ETHTOOL_OPS(ndev, ðtool_ops); + ndev->ethtool_ops = ðtool_ops; netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); /* register the network device */ @@ -2000,12 +1986,12 @@ static int davinci_emac_probe(struct platform_device *pdev) if (rc) { dev_err(&pdev->dev, "error in register_netdev\n"); rc = -ENODEV; - goto no_irq_res; + goto no_cpdma_chan; } if (netif_msg_probe(priv)) { - dev_notice(emac_dev, "DaVinci EMAC Probe found device "\ + dev_notice(&pdev->dev, "DaVinci EMAC Probe found device " "(regs: %p, irq: %d)\n", (void *)priv->emac_base_phys, ndev->irq); } @@ -2015,20 +2001,14 @@ static int davinci_emac_probe(struct platform_device *pdev) return 0; -no_irq_res: +no_cpdma_chan: if (priv->txchan) cpdma_chan_destroy(priv->txchan); if (priv->rxchan) cpdma_chan_destroy(priv->rxchan); cpdma_ctlr_destroy(priv->dma); -no_dma: - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - iounmap(priv->remap_addr); - -probe_quit: +no_pdata: free_netdev(ndev); -no_ndev: return rc; } @@ -2041,25 +2021,18 @@ no_ndev: */ static int davinci_emac_remove(struct platform_device *pdev) { - struct resource *res; struct net_device *ndev = platform_get_drvdata(pdev); struct emac_priv *priv = netdev_priv(ndev); dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); - platform_set_drvdata(pdev, NULL); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (priv->txchan) cpdma_chan_destroy(priv->txchan); if (priv->rxchan) cpdma_chan_destroy(priv->rxchan); cpdma_ctlr_destroy(priv->dma); - release_mem_region(res->start, resource_size(res)); - unregister_netdev(ndev); - iounmap(priv->remap_addr); free_netdev(ndev); return 0; @@ -2092,11 +2065,19 @@ static const struct dev_pm_ops davinci_emac_pm_ops = { .resume = davinci_emac_resume, }; +#if IS_ENABLED(CONFIG_OF) +static const struct emac_platform_data am3517_emac_data = { + .version = EMAC_VERSION_2, + .hw_ram_addr = 0x01e20000, +}; + static const struct of_device_id davinci_emac_of_match[] = { {.compatible = "ti,davinci-dm6467-emac", }, + {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, {}, }; MODULE_DEVICE_TABLE(of, davinci_emac_of_match); +#endif /* davinci_emac_driver: EMAC platform driver structure */ static struct platform_driver davinci_emac_driver = { diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index cca25509b03..735dc53d4b0 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -38,6 +38,7 @@ #include <linux/davinci_emac.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/pinctrl/consumer.h> /* * This timeout definition is a worst-case ultra defensive measure against @@ -81,7 +82,7 @@ struct davinci_mdio_regs { } user[0]; }; -struct mdio_platform_data default_pdata = { +static const struct mdio_platform_data default_pdata = { .bus_freq = DEF_OUT_FREQ, }; @@ -291,6 +292,7 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, return 0; } +#if IS_ENABLED(CONFIG_OF) static int davinci_mdio_probe_dt(struct mdio_platform_data *data, struct platform_device *pdev) { @@ -301,35 +303,32 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data, return -EINVAL; if (of_property_read_u32(node, "bus_freq", &prop)) { - pr_err("Missing bus_freq property in the DT.\n"); + dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n"); return -EINVAL; } data->bus_freq = prop; return 0; } - +#endif static int davinci_mdio_probe(struct platform_device *pdev) { - struct mdio_platform_data *pdata = pdev->dev.platform_data; + struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device *dev = &pdev->dev; struct davinci_mdio_data *data; struct resource *res; struct phy_device *phy; int ret, addr; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) { - dev_err(dev, "failed to alloc device data\n"); + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) return -ENOMEM; - } - data->bus = mdiobus_alloc(); + data->bus = devm_mdiobus_alloc(dev); if (!data->bus) { dev_err(dev, "failed to alloc mii bus\n"); - ret = -ENOMEM; - goto bail_out; + return -ENOMEM; } if (dev->of_node) { @@ -351,7 +350,7 @@ static int davinci_mdio_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); - data->clk = clk_get(&pdev->dev, "fck"); + data->clk = devm_clk_get(dev, "fck"); if (IS_ERR(data->clk)) { dev_err(dev, "failed to get device clock\n"); ret = PTR_ERR(data->clk); @@ -364,24 +363,9 @@ static int davinci_mdio_probe(struct platform_device *pdev) spin_lock_init(&data->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "could not find register map resource\n"); - ret = -ENOENT; - goto bail_out; - } - - res = devm_request_mem_region(dev, res->start, resource_size(res), - dev_name(dev)); - if (!res) { - dev_err(dev, "could not allocate register map resource\n"); - ret = -ENXIO; - goto bail_out; - } - - data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); - if (!data->regs) { - dev_err(dev, "could not map mdio registers\n"); - ret = -ENOMEM; + data->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(data->regs)) { + ret = PTR_ERR(data->regs); goto bail_out; } @@ -403,38 +387,22 @@ static int davinci_mdio_probe(struct platform_device *pdev) return 0; bail_out: - if (data->bus) - mdiobus_free(data->bus); - - if (data->clk) - clk_put(data->clk); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - kfree(data); - return ret; } static int davinci_mdio_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct davinci_mdio_data *data = dev_get_drvdata(dev); + struct davinci_mdio_data *data = platform_get_drvdata(pdev); - if (data->bus) { + if (data->bus) mdiobus_unregister(data->bus); - mdiobus_free(data->bus); - } - if (data->clk) - clk_put(data->clk); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - dev_set_drvdata(dev, NULL); - - kfree(data); - return 0; } @@ -451,10 +419,12 @@ static int davinci_mdio_suspend(struct device *dev) __raw_writel(ctrl, &data->regs->control); wait_for_idle(data); - pm_runtime_put_sync(data->dev); - data->suspended = true; spin_unlock(&data->lock); + pm_runtime_put_sync(data->dev); + + /* Select sleep pin state */ + pinctrl_pm_select_sleep_state(dev); return 0; } @@ -462,15 +432,15 @@ static int davinci_mdio_suspend(struct device *dev) static int davinci_mdio_resume(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); - u32 ctrl; - spin_lock(&data->lock); + /* Select default pin state */ + pinctrl_pm_select_default_state(dev); + pm_runtime_get_sync(data->dev); + spin_lock(&data->lock); /* restart the scan state machine */ - ctrl = __raw_readl(&data->regs->control); - ctrl |= CONTROL_ENABLE; - __raw_writel(ctrl, &data->regs->control); + __davinci_mdio_reset(data); data->suspended = false; spin_unlock(&data->lock); @@ -479,14 +449,17 @@ static int davinci_mdio_resume(struct device *dev) } static const struct dev_pm_ops davinci_mdio_pm_ops = { - .suspend = davinci_mdio_suspend, - .resume = davinci_mdio_resume, + .suspend_late = davinci_mdio_suspend, + .resume_early = davinci_mdio_resume, }; +#if IS_ENABLED(CONFIG_OF) static const struct of_device_id davinci_mdio_of_mtable[] = { { .compatible = "ti,davinci_mdio", }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); +#endif static struct platform_driver davinci_mdio_driver = { .driver = { diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 22725386c5d..62b19be5183 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -319,7 +319,7 @@ static void tlan_remove_one(struct pci_dev *pdev) free_netdev(dev); - pci_set_drvdata(pdev, NULL); + cancel_work_sync(&priv->tlan_tqueue); } static void tlan_start(struct net_device *dev) @@ -371,7 +371,7 @@ static int tlan_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - pci_enable_wake(pdev, 0, 0); + pci_enable_wake(pdev, PCI_D0, 0); netif_device_attach(dev); if (netif_running(dev)) @@ -532,7 +532,6 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev, /* This is a hack. We need to know which board structure * is suited for this adapter */ device_id = inw(ioaddr + EISA_ID2); - priv->is_eisa = 1; if (device_id == 0x20F1) { priv->adapter = &board_info[13]; /* NetFlex-3/E */ priv->adapter_rev = 23; /* TLAN 2.3 */ @@ -1911,10 +1910,8 @@ static void tlan_reset_lists(struct net_device *dev) list->frame_size = TLAN_MAX_FRAME_SIZE; list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); - if (!skb) { - netdev_err(dev, "Out of memory for received data\n"); + if (!skb) break; - } list->buffer[0].address = pci_map_single(priv->pci_dev, skb->data, diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h index 5fc98a8e488..2eb33a25078 100644 --- a/drivers/net/ethernet/ti/tlan.h +++ b/drivers/net/ethernet/ti/tlan.h @@ -207,7 +207,6 @@ struct tlan_priv { u8 tlan_full_duplex; spinlock_t lock; u8 link; - u8 is_eisa; struct work_struct tlan_tqueue; u8 neg_be_verbose; }; |
