aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/octeon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/octeon')
-rw-r--r--drivers/staging/octeon/Kconfig2
-rw-r--r--drivers/staging/octeon/ethernet-defines.h4
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c54
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h1
-rw-r--r--drivers/staging/octeon/ethernet-mem.c18
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c27
-rw-r--r--drivers/staging/octeon/ethernet-rx.c24
-rw-r--r--drivers/staging/octeon/ethernet-spi.c92
-rw-r--r--drivers/staging/octeon/ethernet-tx.c93
-rw-r--r--drivers/staging/octeon/ethernet-util.h2
-rw-r--r--drivers/staging/octeon/ethernet.c181
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h5
12 files changed, 265 insertions, 238 deletions
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 9493128e5fd..6e1d5f8d3ec 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,6 +1,6 @@
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
- depends on CPU_CAVIUM_OCTEON && NETDEVICES
+ depends on CAVIUM_OCTEON_SOC && NETDEVICES
select PHYLIB
select MDIO_OCTEON
help
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index bdaec8d2ca0..2a98a2153e1 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -33,10 +33,6 @@
* driver will use this memory instead of kernel memory for pools. This
* allows 32bit userspace application to access the buffers, but also
* requires all received packets to be copied.
- * CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
- * This kernel config option allows the user to control the number of
- * packet and work queue buffers allocated by the driver. If this is zero,
- * the driver uses the default from below.
* USE_SKBUFFS_IN_HW
* Tells the driver to populate the packet buffers with kernel skbuffs.
* This allows the driver to receive packets without copying them. It also
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 63800ba71d0..3f067f189b3 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -28,6 +28,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/ratelimit.h>
+#include <linux/of_mdio.h>
#include <net/dst.h>
@@ -45,9 +46,9 @@
static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "cavium-ethernet");
- strcpy(info->version, OCTEON_ETHERNET_VERSION);
- strcpy(info->bus_info, "Builtin");
+ strlcpy(info->driver, "cavium-ethernet", sizeof(info->driver));
+ strlcpy(info->version, OCTEON_ETHERNET_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, "Builtin", sizeof(info->bus_info));
}
static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -126,23 +127,21 @@ static void cvm_oct_adjust_link(struct net_device *dev)
link_info.s.link_up = priv->last_link ? 1 : 0;
link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
link_info.s.speed = priv->phydev->speed;
- cvmx_helper_link_set( priv->port, link_info);
+ cvmx_helper_link_set(priv->port, link_info);
if (priv->last_link) {
netif_carrier_on(dev);
if (priv->queue != -1)
printk_ratelimited("%s: %u Mbps %s duplex, "
- "port %2d, queue %2d\n",
- dev->name, priv->phydev->speed,
- priv->phydev->duplex ?
- "Full" : "Half",
- priv->port, priv->queue);
+ "port %2d, queue %2d\n", dev->name,
+ priv->phydev->speed,
+ priv->phydev->duplex ? "Full" : "Half",
+ priv->port, priv->queue);
else
printk_ratelimited("%s: %u Mbps %s duplex, "
- "port %2d, POW\n",
- dev->name, priv->phydev->speed,
- priv->phydev->duplex ?
- "Full" : "Half",
- priv->port);
+ "port %2d, POW\n", dev->name,
+ priv->phydev->speed,
+ priv->phydev->duplex ? "Full" : "Half",
+ priv->port);
} else {
netif_carrier_off(dev);
printk_ratelimited("%s: Link down\n", dev->name);
@@ -161,22 +160,23 @@ static void cvm_oct_adjust_link(struct net_device *dev)
int cvm_oct_phy_setup_device(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
+ struct device_node *phy_node;
- int phy_addr = cvmx_helper_board_get_mii_address(priv->port);
- if (phy_addr != -1) {
- char phy_id[20];
+ if (!priv->of_node)
+ return 0;
- snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr);
+ phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
+ if (!phy_node)
+ return 0;
- priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ priv->phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
+
+ if (priv->phydev == NULL)
+ return -ENODEV;
+
+ priv->last_link = 0;
+ phy_start_aneg(priv->phydev);
- if (IS_ERR(priv->phydev)) {
- priv->phydev = NULL;
- return -1;
- }
- priv->last_link = 0;
- phy_start_aneg(priv->phydev);
- }
return 0;
}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index a417d4fce12..eccfcc54cea 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/string.h>
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 78b6cb74376..bf666b02319 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -30,6 +30,7 @@
#include <asm/octeon/octeon.h>
+#include "ethernet-mem.h"
#include "ethernet-defines.h"
#include <asm/octeon/cvmx-fpa.h>
@@ -48,13 +49,8 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
while (freed) {
struct sk_buff *skb = dev_alloc_skb(size + 256);
- if (unlikely(skb == NULL)) {
- pr_warning
- ("Failed to allocate skb for hardware pool %d\n",
- pool);
+ if (unlikely(skb == NULL))
break;
- }
-
skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
@@ -84,10 +80,10 @@ static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
} while (memory);
if (elements < 0)
- pr_warning("Freeing of pool %u had too many skbuffs (%d)\n",
+ pr_warn("Freeing of pool %u had too many skbuffs (%d)\n",
pool, elements);
else if (elements > 0)
- pr_warning("Freeing of pool %u is missing %d skbuffs\n",
+ pr_warn("Freeing of pool %u is missing %d skbuffs\n",
pool, elements);
}
@@ -118,7 +114,7 @@ static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
*/
memory = kmalloc(size + 256, GFP_ATOMIC);
if (unlikely(memory == NULL)) {
- pr_warning("Unable to allocate %u bytes for FPA pool %d\n",
+ pr_warn("Unable to allocate %u bytes for FPA pool %d\n",
elements * size, pool);
break;
}
@@ -151,10 +147,10 @@ static void cvm_oct_free_hw_memory(int pool, int size, int elements)
} while (fpa);
if (elements < 0)
- pr_warning("Freeing of pool %u had too many buffers (%d)\n",
+ pr_warn("Freeing of pool %u had too many buffers (%d)\n",
pool, elements);
else if (elements > 0)
- pr_warning("Warning: Freeing of pool %u is missing %d buffers\n",
+ pr_warn("Warning: Freeing of pool %u is missing %d buffers\n",
pool, elements);
}
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index d8f5f694ec3..0ec0da32821 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -43,7 +43,7 @@
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
-DEFINE_SPINLOCK(global_register_lock);
+static DEFINE_SPINLOCK(global_register_lock);
static int number_rgmii_ports;
@@ -72,7 +72,8 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
* If the 10Mbps preamble workaround is supported and we're
* at 10Mbps we may need to do some special checking.
*/
- if (USE_10MBPS_PREAMBLE_WORKAROUND && (link_info.s.speed == 10)) {
+ if (USE_10MBPS_PREAMBLE_WORKAROUND &&
+ (link_info.s.speed == 10)) {
/*
* Read the GMXX_RXX_INT_REG[PCTERR] bit and
@@ -166,9 +167,8 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
if (use_global_register_lock)
spin_unlock_irqrestore(&global_register_lock, flags);
- else {
+ else
mutex_unlock(&priv->phydev->bus->mdio_lock);
- }
if (priv->phydev == NULL) {
/* Tell core. */
@@ -232,8 +232,10 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
(interface, index)];
struct octeon_ethernet *priv = netdev_priv(dev);
- if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
- queue_work(cvm_oct_poll_queue, &priv->port_work);
+ if (dev &&
+ !atomic_read(&cvm_oct_poll_queue_stopping))
+ queue_work(cvm_oct_poll_queue,
+ &priv->port_work);
gmx_rx_int_reg.u64 = 0;
gmx_rx_int_reg.s.phy_dupx = 1;
@@ -274,8 +276,10 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
(interface, index)];
struct octeon_ethernet *priv = netdev_priv(dev);
- if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
- queue_work(cvm_oct_poll_queue, &priv->port_work);
+ if (dev &&
+ !atomic_read(&cvm_oct_poll_queue_stopping))
+ queue_work(cvm_oct_poll_queue,
+ &priv->port_work);
gmx_rx_int_reg.u64 = 0;
gmx_rx_int_reg.s.phy_dupx = 1;
@@ -327,7 +331,8 @@ int cvm_oct_rgmii_stop(struct net_device *dev)
static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
{
- struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work);
+ struct octeon_ethernet *priv =
+ container_of(work, struct octeon_ethernet, port_work);
cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
}
@@ -373,9 +378,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
* Enable interrupts on inband status changes
* for this port.
*/
- gmx_rx_int_en.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
- (index, interface));
+ gmx_rx_int_en.u64 = 0;
gmx_rx_int_en.s.phy_dupx = 1;
gmx_rx_int_en.s.phy_link = 1;
gmx_rx_int_en.s.phy_spd = 1;
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 400df8cbee5..a0f4868cfa1 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -29,13 +29,13 @@
#include <linux/cache.h>
#include <linux/cpumask.h>
#include <linux/netdevice.h>
-#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/string.h>
#include <linux/prefetch.h>
#include <linux/ratelimit.h>
#include <linux/smp.h>
+#include <linux/interrupt.h>
#include <net/dst.h>
#ifdef CONFIG_XFRM
#include <linux/xfrm.h>
@@ -71,7 +71,7 @@ struct cvm_oct_core_state {
int baseline_cores;
/*
* The number of additional cores that could be processing
- * input packtes.
+ * input packets.
*/
atomic_t available_cores;
cpumask_t cpu_state;
@@ -79,6 +79,8 @@ struct cvm_oct_core_state {
static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
+static int cvm_irq_cpu;
+
static void cvm_oct_enable_napi(void *_)
{
int cpu = smp_processor_id();
@@ -111,11 +113,7 @@ static void cvm_oct_no_more_work(void)
{
int cpu = smp_processor_id();
- /*
- * CPU zero is special. It always has the irq enabled when
- * waiting for incoming packets.
- */
- if (cpu == 0) {
+ if (cpu == cvm_irq_cpu) {
enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
return;
}
@@ -134,6 +132,7 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
{
/* Disable the IRQ and start napi_poll. */
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ cvm_irq_cpu = smp_processor_id();
cvm_oct_enable_napi(NULL);
return IRQ_HANDLED;
@@ -162,7 +161,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
/*
* We received a packet with either an alignment error
* or a FCS error. This may be signalling that we are
- * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK}
+ * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
* off. If this is the case we need to parse the
* packet to determine if we can remove a non spec
* preamble and generate a correct packet.
@@ -302,6 +301,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (backlog > budget * cores_in_use && napi != NULL)
cvm_oct_enable_one_cpu();
}
+ rx_count++;
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
if (likely(skb_in_hw)) {
@@ -335,9 +335,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
*/
skb = dev_alloc_skb(work->len);
if (!skb) {
- printk_ratelimited("Port %d failed to allocate "
- "skbuff, packet dropped\n",
- work->ipprt);
cvm_oct_free_work(work);
continue;
}
@@ -428,7 +425,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
#endif
}
netif_receive_skb(skb);
- rx_count++;
} else {
/* Drop any packet received for a device that isn't up */
/*
@@ -516,7 +512,7 @@ void cvm_oct_rx_initialize(void)
if (NULL == dev_for_napi)
panic("No net_devices were allocated.");
- if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
+ if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus())
atomic_set(&core_state.available_cores, max_rx_cpus);
else
atomic_set(&core_state.available_cores, num_online_cpus());
@@ -528,7 +524,7 @@ void cvm_oct_rx_initialize(void)
cvm_oct_napi_poll, rx_napi_weight);
napi_enable(&cvm_oct_napi[i].napi);
}
- /* Register an IRQ hander for to receive POW interrupts */
+ /* Register an IRQ handler to receive POW interrupts */
i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index af8d62818f1..5108bc0bb57 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -64,31 +64,23 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
if (spx_int_reg.s.spf)
pr_err("SPI1: SRX Spi4 interface down\n");
if (spx_int_reg.s.calerr)
- pr_err("SPI1: SRX Spi4 Calendar table "
- "parity error\n");
+ pr_err("SPI1: SRX Spi4 Calendar table parity error\n");
if (spx_int_reg.s.syncerr)
- pr_err("SPI1: SRX Consecutive Spi4 DIP4 "
- "errors have exceeded "
- "SPX_ERR_CTL[ERRCNT]\n");
+ pr_err("SPI1: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n");
if (spx_int_reg.s.diperr)
pr_err("SPI1: SRX Spi4 DIP4 error\n");
if (spx_int_reg.s.tpaovr)
- pr_err("SPI1: SRX Selected port has hit "
- "TPA overflow\n");
+ pr_err("SPI1: SRX Selected port has hit TPA overflow\n");
if (spx_int_reg.s.rsverr)
- pr_err("SPI1: SRX Spi4 reserved control "
- "word detected\n");
+ pr_err("SPI1: SRX Spi4 reserved control word detected\n");
if (spx_int_reg.s.drwnng)
- pr_err("SPI1: SRX Spi4 receive FIFO "
- "drowning/overflow\n");
+ pr_err("SPI1: SRX Spi4 receive FIFO drowning/overflow\n");
if (spx_int_reg.s.clserr)
- pr_err("SPI1: SRX Spi4 packet closed on "
- "non-16B alignment without EOP\n");
+ pr_err("SPI1: SRX Spi4 packet closed on non-16B alignment without EOP\n");
if (spx_int_reg.s.spiovr)
pr_err("SPI1: SRX Spi4 async FIFO overflow\n");
if (spx_int_reg.s.abnorm)
- pr_err("SPI1: SRX Abnormal packet "
- "termination (ERR bit)\n");
+ pr_err("SPI1: SRX Abnormal packet termination (ERR bit)\n");
if (spx_int_reg.s.prtnxa)
pr_err("SPI1: SRX Port out of range\n");
}
@@ -99,31 +91,23 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(1));
if (stx_int_reg.s.syncerr)
- pr_err("SPI1: STX Interface encountered a "
- "fatal error\n");
+ pr_err("SPI1: STX Interface encountered a fatal error\n");
if (stx_int_reg.s.frmerr)
- pr_err("SPI1: STX FRMCNT has exceeded "
- "STX_DIP_CNT[MAXFRM]\n");
+ pr_err("SPI1: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n");
if (stx_int_reg.s.unxfrm)
- pr_err("SPI1: STX Unexpected framing "
- "sequence\n");
+ pr_err("SPI1: STX Unexpected framing sequence\n");
if (stx_int_reg.s.nosync)
- pr_err("SPI1: STX ERRCNT has exceeded "
- "STX_DIP_CNT[MAXDIP]\n");
+ pr_err("SPI1: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n");
if (stx_int_reg.s.diperr)
- pr_err("SPI1: STX DIP2 error on the Spi4 "
- "Status channel\n");
+ pr_err("SPI1: STX DIP2 error on the Spi4 Status channel\n");
if (stx_int_reg.s.datovr)
pr_err("SPI1: STX Spi4 FIFO overflow error\n");
if (stx_int_reg.s.ovrbst)
- pr_err("SPI1: STX Transmit packet burst "
- "too big\n");
+ pr_err("SPI1: STX Transmit packet burst too big\n");
if (stx_int_reg.s.calpar1)
- pr_err("SPI1: STX Calendar Table Parity "
- "Error Bank1\n");
+ pr_err("SPI1: STX Calendar Table Parity Error Bank1\n");
if (stx_int_reg.s.calpar0)
- pr_err("SPI1: STX Calendar Table Parity "
- "Error Bank0\n");
+ pr_err("SPI1: STX Calendar Table Parity Error Bank0\n");
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(1), 0);
@@ -144,31 +128,23 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
if (spx_int_reg.s.spf)
pr_err("SPI0: SRX Spi4 interface down\n");
if (spx_int_reg.s.calerr)
- pr_err("SPI0: SRX Spi4 Calendar table "
- "parity error\n");
+ pr_err("SPI0: SRX Spi4 Calendar table parity error\n");
if (spx_int_reg.s.syncerr)
- pr_err("SPI0: SRX Consecutive Spi4 DIP4 "
- "errors have exceeded "
- "SPX_ERR_CTL[ERRCNT]\n");
+ pr_err("SPI0: SRX Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT]\n");
if (spx_int_reg.s.diperr)
pr_err("SPI0: SRX Spi4 DIP4 error\n");
if (spx_int_reg.s.tpaovr)
- pr_err("SPI0: SRX Selected port has hit "
- "TPA overflow\n");
+ pr_err("SPI0: SRX Selected port has hit TPA overflow\n");
if (spx_int_reg.s.rsverr)
- pr_err("SPI0: SRX Spi4 reserved control "
- "word detected\n");
+ pr_err("SPI0: SRX Spi4 reserved control word detected\n");
if (spx_int_reg.s.drwnng)
- pr_err("SPI0: SRX Spi4 receive FIFO "
- "drowning/overflow\n");
+ pr_err("SPI0: SRX Spi4 receive FIFO drowning/overflow\n");
if (spx_int_reg.s.clserr)
- pr_err("SPI0: SRX Spi4 packet closed on "
- "non-16B alignment without EOP\n");
+ pr_err("SPI0: SRX Spi4 packet closed on non-16B alignment without EOP\n");
if (spx_int_reg.s.spiovr)
pr_err("SPI0: SRX Spi4 async FIFO overflow\n");
if (spx_int_reg.s.abnorm)
- pr_err("SPI0: SRX Abnormal packet "
- "termination (ERR bit)\n");
+ pr_err("SPI0: SRX Abnormal packet termination (ERR bit)\n");
if (spx_int_reg.s.prtnxa)
pr_err("SPI0: SRX Port out of range\n");
}
@@ -179,31 +155,23 @@ static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(0));
if (stx_int_reg.s.syncerr)
- pr_err("SPI0: STX Interface encountered a "
- "fatal error\n");
+ pr_err("SPI0: STX Interface encountered a fatal error\n");
if (stx_int_reg.s.frmerr)
- pr_err("SPI0: STX FRMCNT has exceeded "
- "STX_DIP_CNT[MAXFRM]\n");
+ pr_err("SPI0: STX FRMCNT has exceeded STX_DIP_CNT[MAXFRM]\n");
if (stx_int_reg.s.unxfrm)
- pr_err("SPI0: STX Unexpected framing "
- "sequence\n");
+ pr_err("SPI0: STX Unexpected framing sequence\n");
if (stx_int_reg.s.nosync)
- pr_err("SPI0: STX ERRCNT has exceeded "
- "STX_DIP_CNT[MAXDIP]\n");
+ pr_err("SPI0: STX ERRCNT has exceeded STX_DIP_CNT[MAXDIP]\n");
if (stx_int_reg.s.diperr)
- pr_err("SPI0: STX DIP2 error on the Spi4 "
- "Status channel\n");
+ pr_err("SPI0: STX DIP2 error on the Spi4 Status channel\n");
if (stx_int_reg.s.datovr)
pr_err("SPI0: STX Spi4 FIFO overflow error\n");
if (stx_int_reg.s.ovrbst)
- pr_err("SPI0: STX Transmit packet burst "
- "too big\n");
+ pr_err("SPI0: STX Transmit packet burst too big\n");
if (stx_int_reg.s.calpar1)
- pr_err("SPI0: STX Calendar Table Parity "
- "Error Bank1\n");
+ pr_err("SPI0: STX Calendar Table Parity Error Bank1\n");
if (stx_int_reg.s.calpar0)
- pr_err("SPI0: STX Calendar Table Parity "
- "Error Bank0\n");
+ pr_err("SPI0: STX Calendar Table Parity Error Bank0\n");
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(0), 0);
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 56d74dc2fbd..4e54d854021 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -27,11 +27,11 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/ratelimit.h>
#include <linux/string.h>
+#include <linux/interrupt.h>
#include <net/dst.h>
#ifdef CONFIG_XFRM
#include <linux/xfrm.h>
@@ -61,7 +61,7 @@
* You can define GET_SKBUFF_QOS() to override how the skbuff output
* function determines which output queue is used. The default
* implementation always uses the base queue for the port. If, for
- * example, you wanted to use the skb->priority fieid, define
+ * example, you wanted to use the skb->priority field, define
* GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
*/
#ifndef GET_SKBUFF_QOS
@@ -77,10 +77,12 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
{
int32_t undo;
- undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
+ MAX_SKB_TO_FREE;
if (undo > 0)
cvmx_fau_atomic_add32(fau, -undo);
- skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
+ -skb_to_free;
return skb_to_free;
}
@@ -93,7 +95,7 @@ static void cvm_oct_kick_tx_poll_watchdog(void)
cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
}
-void cvm_oct_free_tx_skbs(struct net_device *dev)
+static void cvm_oct_free_tx_skbs(struct net_device *dev)
{
int32_t skb_to_free;
int qos, queues_per_port;
@@ -107,8 +109,10 @@ void cvm_oct_free_tx_skbs(struct net_device *dev)
for (qos = 0; qos < queues_per_port; qos++) {
if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
continue;
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4,
+ MAX_SKB_TO_FREE);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau+qos*4);
total_freed += skb_to_free;
@@ -116,12 +120,14 @@ void cvm_oct_free_tx_skbs(struct net_device *dev)
struct sk_buff *to_free_list = NULL;
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
while (skb_to_free > 0) {
- struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
+ struct sk_buff *t;
+ t = __skb_dequeue(&priv->tx_free_list[qos]);
t->next = to_free_list;
to_free_list = t;
skb_to_free--;
}
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
+ flags);
/* Do the actual freeing outside of the lock. */
while (to_free_list) {
struct sk_buff *t = to_free_list;
@@ -164,8 +170,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
/*
- * Prefetch the private data structure. It is larger that one
- * cache line.
+ * Prefetch the private data structure. It is larger than the
+ * one cache line.
*/
prefetch(priv);
@@ -210,15 +216,23 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(__skb_linearize(skb))) {
queue_type = QUEUE_DROP;
if (USE_ASYNC_IOBDMA) {
- /* Get the number of skbuffs in use by the hardware */
+ /*
+ * Get the number of skbuffs in use
+ * by the hardware
+ */
CVMX_SYNCIOBDMA;
- skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ skb_to_free =
+ cvmx_scratch_read64(CVMX_SCR_SCRATCH);
} else {
- /* Get the number of skbuffs in use by the hardware */
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
+ /*
+ * Get the number of skbuffs in use
+ * by the hardware
+ */
+ skb_to_free = cvmx_fau_fetch_and_add32(
+ priv->fau + qos * 4, MAX_SKB_TO_FREE);
}
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau + qos * 4);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
goto skip_xmit;
}
@@ -275,7 +289,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
+ hw_buffer.s.addr = XKPHYS_TO_PHYS(
+ (u64)(page_address(fs->page.p) +
+ fs->page_offset));
hw_buffer.s.size = fs->size;
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
@@ -290,8 +306,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
* See if we can put this skb in the FPA pool. Any strange
* behavior from the Linux networking stack will most likely
* be caused by a bug in the following code. If some field is
- * in use by the network stack and get carried over when a
- * buffer is reused, bad thing may happen. If in doubt and
+ * in use by the network stack and gets carried over when a
+ * buffer is reused, bad things may happen. If in doubt and
* you dont need the absolute best performance, disable the
* define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
* shown a 25% increase in performance under some loads.
@@ -344,7 +360,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (unlikely
(skb->truesize !=
- sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
+ sizeof(*skb) + skb_end_offset(skb))) {
/*
printk("TX buffer truesize has been changed\n");
*/
@@ -357,7 +373,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
*/
pko_command.s.dontfree = 0;
- hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
+ hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
+ ((unsigned long)fpa_head >> 7);
+
*(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
/*
@@ -421,17 +439,22 @@ dont_put_skbuff_in_hw:
queue_type = QUEUE_HW;
}
if (USE_ASYNC_IOBDMA)
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
+ cvmx_fau_async_fetch_and_add32(
+ CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
/* Drop this packet if we have too many already queued to the HW */
- if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
+ if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
+ MAX_OUT_QUEUE_DEPTH)) {
+
if (dev->tx_queue_len != 0) {
/* Drop the lock when notifying the core. */
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
+ flags);
netif_stop_queue(dev);
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock,
+ flags);
} else {
/* If not using normal queueing. */
queue_type = QUEUE_DROP;
@@ -447,7 +470,8 @@ dont_put_skbuff_in_hw:
priv->queue + qos,
pko_command, hw_buffer,
CVMX_PKO_LOCK_NONE))) {
- printk_ratelimited("%s: Failed to send the packet\n", dev->name);
+ printk_ratelimited("%s: Failed to send the packet\n",
+ dev->name);
queue_type = QUEUE_DROP;
}
skip_xmit:
@@ -492,7 +516,8 @@ skip_xmit:
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
} else {
- total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
+ total_to_clean = cvmx_fau_fetch_and_add32(
+ FAU_TOTAL_TX_TO_CLEAN, 1);
}
if (total_to_clean & 0x3ff) {
@@ -526,10 +551,10 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* Get a work queue entry */
cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
if (unlikely(work == NULL)) {
- printk_ratelimited("%s: Failed to allocate a work "
- "queue entry\n", dev->name);
+ printk_ratelimited("%s: Failed to allocate a work queue entry\n",
+ dev->name);
priv->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return 0;
}
@@ -540,7 +565,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
dev->name);
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
priv->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return 0;
}
@@ -657,7 +682,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
work->grp);
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return 0;
}
@@ -708,7 +733,7 @@ void cvm_oct_tx_initialize(void)
/* Disable the interrupt. */
cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
- /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
+ /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */
i = request_irq(OCTEON_IRQ_TIMER1,
cvm_oct_tx_cleanup_watchdog, 0,
"Ethernet", cvm_oct_device);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 144fb99bf50..2da5ce17ead 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -38,7 +38,7 @@ static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
}
/**
- * INTERFACE - convert IPD port to locgical interface
+ * INTERFACE - convert IPD port to logical interface
* @ipd_port: Port to check
*
* Returns Logical interface
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 9112cd88215..da9dd6bc566 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -24,13 +24,15 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
**********************************************************************/
+#include <linux/platform_device.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/phy.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/of_net.h>
#include <net/dst.h>
@@ -53,23 +55,17 @@
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-smix-defs.h>
-#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
- && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
-int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
-#else
-int num_packet_buffers = 1024;
-#endif
+static int num_packet_buffers = 1024;
module_param(num_packet_buffers, int, 0444);
MODULE_PARM_DESC(num_packet_buffers, "\n"
"\tNumber of packet buffers to allocate and store in the\n"
- "\tFPA. By default, 1024 packet buffers are used unless\n"
- "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
+ "\tFPA. By default, 1024 packet buffers are used.\n");
int pow_receive_group = 15;
module_param(pow_receive_group, int, 0444);
MODULE_PARM_DESC(pow_receive_group, "\n"
"\tPOW group to receive packets from. All ethernet hardware\n"
- "\twill be configured to send incomming packets to this POW\n"
+ "\twill be configured to send incoming packets to this POW\n"
"\tgroup. Also any other software can submit packets to this\n"
"\tgroup for the kernel to process.");
@@ -112,15 +108,6 @@ int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
-/*
- * The offset from mac_addr_base that should be used for the next port
- * that is configured. By convention, if any mgmt ports exist on the
- * chip, they get the first mac addresses, The ports controlled by
- * this driver are numbered sequencially following any mgmt addresses
- * that may exist.
- */
-static unsigned int cvm_oct_mac_addr_offset;
-
/**
* cvm_oct_poll_queue - Workqueue for polling operations.
*/
@@ -169,13 +156,15 @@ static void cvm_oct_periodic_worker(struct work_struct *work)
if (priv->poll)
priv->poll(cvm_oct_device[priv->port]);
- cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]);
+ cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
+ cvm_oct_device[priv->port]);
if (!atomic_read(&cvm_oct_poll_queue_stopping))
- queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
- }
+ queue_delayed_work(cvm_oct_poll_queue,
+ &priv->port_periodic_work, HZ);
+}
-static __init void cvm_oct_configure_common_hw(void)
+static void cvm_oct_configure_common_hw(void)
{
/* Setup the FPA */
cvmx_fpa_enable();
@@ -356,7 +345,7 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
/* Force accept multicast packets */
control.s.mcst = 2;
else
- /* Force reject multicat packets */
+ /* Force reject multicast packets */
control.s.mcst = 1;
if (dev->flags & IFF_PROMISC)
@@ -395,23 +384,21 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
* Returns Zero on success
*/
-static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
+static int cvm_oct_set_mac_filter(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
union cvmx_gmxx_prtx_cfg gmx_cfg;
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
- memcpy(dev->dev_addr, addr + 2, 6);
-
if ((interface < 2)
&& (cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
int i;
- uint8_t *ptr = addr;
+ uint8_t *ptr = dev->dev_addr;
uint64_t mac = 0;
for (i = 0; i < 6; i++)
- mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
+ mac = (mac << 8) | (uint64_t)ptr[i];
gmx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
@@ -420,17 +407,17 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
- ptr[2]);
+ ptr[0]);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
- ptr[3]);
+ ptr[1]);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
- ptr[4]);
+ ptr[2]);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
- ptr[5]);
+ ptr[3]);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
- ptr[6]);
+ ptr[4]);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
- ptr[7]);
+ ptr[5]);
cvm_oct_common_set_multicast_list(dev);
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
gmx_cfg.u64);
@@ -438,6 +425,15 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
+static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
+{
+ int r = eth_mac_addr(dev, addr);
+
+ if (r)
+ return r;
+ return cvm_oct_set_mac_filter(dev);
+}
+
/**
* cvm_oct_common_init - per network device initialization
* @dev: Device to initialize
@@ -447,26 +443,15 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
int cvm_oct_common_init(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- struct sockaddr sa;
- u64 mac = ((u64)(octeon_bootinfo->mac_addr_base[0] & 0xff) << 40) |
- ((u64)(octeon_bootinfo->mac_addr_base[1] & 0xff) << 32) |
- ((u64)(octeon_bootinfo->mac_addr_base[2] & 0xff) << 24) |
- ((u64)(octeon_bootinfo->mac_addr_base[3] & 0xff) << 16) |
- ((u64)(octeon_bootinfo->mac_addr_base[4] & 0xff) << 8) |
- (u64)(octeon_bootinfo->mac_addr_base[5] & 0xff);
-
- mac += cvm_oct_mac_addr_offset;
- sa.sa_data[0] = (mac >> 40) & 0xff;
- sa.sa_data[1] = (mac >> 32) & 0xff;
- sa.sa_data[2] = (mac >> 24) & 0xff;
- sa.sa_data[3] = (mac >> 16) & 0xff;
- sa.sa_data[4] = (mac >> 8) & 0xff;
- sa.sa_data[5] = mac & 0xff;
-
- if (cvm_oct_mac_addr_offset >= octeon_bootinfo->mac_addr_count)
- printk(KERN_DEBUG "%s: Using MAC outside of the assigned range:"
- " %pM\n", dev->name, sa.sa_data);
- cvm_oct_mac_addr_offset++;
+ const u8 *mac = NULL;
+
+ if (priv->of_node)
+ mac = of_get_mac_address(priv->of_node);
+
+ if (mac)
+ memcpy(dev->dev_addr, mac, ETH_ALEN);
+ else
+ eth_hw_addr_random(dev);
/*
* Force the interface to use the POW send if always_use_pow
@@ -484,10 +469,10 @@ int cvm_oct_common_init(struct net_device *dev)
/* We do our own locking, Linux doesn't need to */
dev->features |= NETIF_F_LLTX;
- SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
+ dev->ethtool_ops = &cvm_oct_ethtool_ops;
cvm_oct_phy_setup_device(dev);
- dev->netdev_ops->ndo_set_mac_address(dev, &sa);
+ cvm_oct_set_mac_filter(dev);
dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
/*
@@ -594,22 +579,55 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
extern void octeon_mdiobus_force_mod_depencency(void);
-static int __init cvm_oct_init_module(void)
+static struct device_node *cvm_oct_of_get_child(
+ const struct device_node *parent, int reg_val)
+{
+ struct device_node *node = NULL;
+ int size;
+ const __be32 *addr;
+
+ for (;;) {
+ node = of_get_next_child(parent, node);
+ if (!node)
+ break;
+ addr = of_get_property(node, "reg", &size);
+ if (addr && (be32_to_cpu(*addr) == reg_val))
+ break;
+ }
+ return node;
+}
+
+static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
+ int interface, int port)
+{
+ struct device_node *ni, *np;
+
+ ni = cvm_oct_of_get_child(pip, interface);
+ if (!ni)
+ return NULL;
+
+ np = cvm_oct_of_get_child(ni, port);
+ of_node_put(ni);
+
+ return np;
+}
+
+static int cvm_oct_probe(struct platform_device *pdev)
{
int num_interfaces;
int interface;
int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
int qos;
+ struct device_node *pip;
octeon_mdiobus_force_mod_depencency();
pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
- if (OCTEON_IS_MODEL(OCTEON_CN52XX))
- cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
- else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
- else
- cvm_oct_mac_addr_offset = 0;
+ pip = pdev->dev.of_node;
+ if (!pip) {
+ pr_err("Error: No 'pip' in /aliases\n");
+ return -EINVAL;
+ }
cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
if (cvm_oct_poll_queue == NULL) {
@@ -688,10 +706,12 @@ static int __init cvm_oct_init_module(void)
cvmx_helper_interface_get_mode(interface);
int num_ports = cvmx_helper_ports_on_interface(interface);
int port;
+ int port_index;
- for (port = cvmx_helper_get_ipd_port(interface, 0);
+ for (port_index = 0,
+ port = cvmx_helper_get_ipd_port(interface, 0);
port < cvmx_helper_get_ipd_port(interface, num_ports);
- port++) {
+ port_index++, port++) {
struct octeon_ethernet *priv;
struct net_device *dev =
alloc_etherdev(sizeof(struct octeon_ethernet));
@@ -702,6 +722,8 @@ static int __init cvm_oct_init_module(void)
/* Initialize the device private structure. */
priv = netdev_priv(dev);
+ priv->of_node = cvm_oct_node_for_port(pip, interface,
+ port_index);
INIT_DELAYED_WORK(&priv->port_periodic_work,
cvm_oct_periodic_worker);
@@ -768,7 +790,7 @@ static int __init cvm_oct_init_module(void)
cvmx_pko_get_num_queues(priv->port) *
sizeof(uint32_t);
queue_delayed_work(cvm_oct_poll_queue,
- &priv->port_periodic_work, HZ);
+ &priv->port_periodic_work, HZ);
}
}
}
@@ -786,7 +808,7 @@ static int __init cvm_oct_init_module(void)
return 0;
}
-static void __exit cvm_oct_cleanup_module(void)
+static int cvm_oct_remove(struct platform_device *pdev)
{
int port;
@@ -834,10 +856,29 @@ static void __exit cvm_oct_cleanup_module(void)
if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+ return 0;
}
+static struct of_device_id cvm_oct_match[] = {
+ {
+ .compatible = "cavium,octeon-3860-pip",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cvm_oct_match);
+
+static struct platform_driver cvm_oct_driver = {
+ .probe = cvm_oct_probe,
+ .remove = cvm_oct_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .of_match_table = cvm_oct_match,
+ },
+};
+
+module_platform_driver(cvm_oct_driver);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
-module_init(cvm_oct_init_module);
-module_exit(cvm_oct_cleanup_module);
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index d5819256355..4cf3884070f 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -31,6 +31,8 @@
#ifndef OCTEON_ETHERNET_H
#define OCTEON_ETHERNET_H
+#include <linux/of.h>
+
/**
* This is the definition of the Ethernet driver's private
* driver state stored in netdev_priv(dev).
@@ -56,9 +58,10 @@ struct octeon_ethernet {
/* Last negotiated link state */
uint64_t link_info;
/* Called periodically to check link status */
- void (*poll) (struct net_device *dev);
+ void (*poll)(struct net_device *dev);
struct delayed_work port_periodic_work;
struct work_struct port_work; /* may be unused. */
+ struct device_node *of_node;
};
int cvm_oct_free_work(void *work_queue_entry);