aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/octeon/ethernet-rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/octeon/ethernet-rx.c')
-rw-r--r--drivers/staging/octeon/ethernet-rx.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 400df8cbee5..a0f4868cfa1 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -29,13 +29,13 @@
#include <linux/cache.h>
#include <linux/cpumask.h>
#include <linux/netdevice.h>
-#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/string.h>
#include <linux/prefetch.h>
#include <linux/ratelimit.h>
#include <linux/smp.h>
+#include <linux/interrupt.h>
#include <net/dst.h>
#ifdef CONFIG_XFRM
#include <linux/xfrm.h>
@@ -71,7 +71,7 @@ struct cvm_oct_core_state {
int baseline_cores;
/*
* The number of additional cores that could be processing
- * input packtes.
+ * input packets.
*/
atomic_t available_cores;
cpumask_t cpu_state;
@@ -79,6 +79,8 @@ struct cvm_oct_core_state {
static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
+static int cvm_irq_cpu;
+
static void cvm_oct_enable_napi(void *_)
{
int cpu = smp_processor_id();
@@ -111,11 +113,7 @@ static void cvm_oct_no_more_work(void)
{
int cpu = smp_processor_id();
- /*
- * CPU zero is special. It always has the irq enabled when
- * waiting for incoming packets.
- */
- if (cpu == 0) {
+ if (cpu == cvm_irq_cpu) {
enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
return;
}
@@ -134,6 +132,7 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
{
/* Disable the IRQ and start napi_poll. */
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ cvm_irq_cpu = smp_processor_id();
cvm_oct_enable_napi(NULL);
return IRQ_HANDLED;
@@ -162,7 +161,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
/*
* We received a packet with either an alignment error
* or a FCS error. This may be signalling that we are
- * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK}
+ * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
* off. If this is the case we need to parse the
* packet to determine if we can remove a non spec
* preamble and generate a correct packet.
@@ -302,6 +301,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (backlog > budget * cores_in_use && napi != NULL)
cvm_oct_enable_one_cpu();
}
+ rx_count++;
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
if (likely(skb_in_hw)) {
@@ -335,9 +335,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
*/
skb = dev_alloc_skb(work->len);
if (!skb) {
- printk_ratelimited("Port %d failed to allocate "
- "skbuff, packet dropped\n",
- work->ipprt);
cvm_oct_free_work(work);
continue;
}
@@ -428,7 +425,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
#endif
}
netif_receive_skb(skb);
- rx_count++;
} else {
/* Drop any packet received for a device that isn't up */
/*
@@ -516,7 +512,7 @@ void cvm_oct_rx_initialize(void)
if (NULL == dev_for_napi)
panic("No net_devices were allocated.");
- if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
+ if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus())
atomic_set(&core_state.available_cores, max_rx_cpus);
else
atomic_set(&core_state.available_cores, num_online_cpus());
@@ -528,7 +524,7 @@ void cvm_oct_rx_initialize(void)
cvm_oct_napi_poll, rx_napi_weight);
napi_enable(&cvm_oct_napi[i].napi);
}
- /* Register an IRQ hander for to receive POW interrupts */
+ /* Register an IRQ handler to receive POW interrupts */
i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);