diff options
author | Francois Romieu <romieu@fr.zoreil.com> | 2006-12-17 23:03:15 +0100 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-12-26 16:24:11 -0500 |
commit | d15e9c4d9a75702b30e00cdf95c71c88e3f3f51e (patch) | |
tree | e7ba1469eac6f732a7d2b9debc7713d8173dc0a2 /drivers/net/forcedeth.c | |
parent | 79f3d3996f06ee339c6f173e573826eccd3914ab (diff) |
netpoll: drivers must not enable IRQ unconditionally in their NAPI handler
net/core/netpoll.c::netpoll_send_skb() calls the poll handler when
it is available. As netconsole can be used from almost any context,
IRQ must not be enabled blindly in the NAPI handler of a driver which
supports netpoll.
b57bd06655a028aba7b92e1c19c2093e7fcfb341 fixed the issue for the
8139too.c driver.
Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 439f4133829..820f8c79842 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -2576,14 +2576,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget) int pkts, limit = min(*budget, dev->quota); struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); + unsigned long flags; pkts = nv_rx_process(dev, limit); if (nv_alloc_rx(dev)) { - spin_lock_irq(&np->lock); + spin_lock_irqsave(&np->lock, flags); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); - spin_unlock_irq(&np->lock); + spin_unlock_irqrestore(&np->lock, flags); } if (pkts < limit) { @@ -2591,13 +2592,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget) netif_rx_complete(dev); /* re-enable receive interrupts */ - spin_lock_irq(&np->lock); + spin_lock_irqsave(&np->lock, flags); + np->irqmask |= NVREG_IRQ_RX_ALL; if (np->msi_flags & NV_MSI_X_ENABLED) writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); else writel(np->irqmask, base + NvRegIrqMask); - spin_unlock_irq(&np->lock); + + spin_unlock_irqrestore(&np->lock, flags); return 0; } else { /* used up our quantum, so reschedule */ |