diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-31 16:58:50 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-31 16:58:50 -0700 |
commit | c3f26a269c2421f97f10cf8ed05d5099b573af4d (patch) | |
tree | d0602cbb48742b3e39ab6bdcaa08c342d4cd2cae /net | |
parent | 967ab999a090b1a4e7d3c7febfd6d89b42fb4cf4 (diff) |
netdev: Fix lockdep warnings in multiqueue configurations.
When support for multiple TX queues were added, the
netif_tx_lock() routines we converted to iterate over
all TX queues and grab each queue's spinlock.
This causes heartburn for lockdep and it's not a healthy
thing to do with lots of TX queues anyways.
So modify this to use a top-level lock and a "frozen"
state for the individual TX queues.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 1 | ||||
-rw-r--r-- | net/core/netpoll.c | 1 | ||||
-rw-r--r-- | net/core/pktgen.c | 7 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 6 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 9 |
5 files changed, 16 insertions, 8 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 63d6bcddbf4..69320a56a08 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4200,6 +4200,7 @@ static void netdev_init_queues(struct net_device *dev) { netdev_init_one_queue(dev, &dev->rx_queue, NULL); netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); + spin_lock_init(&dev->tx_global_lock); } /** diff --git a/net/core/netpoll.c b/net/core/netpoll.c index c12720895ec..6c7af390be0 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -70,6 +70,7 @@ static void queue_process(struct work_struct *work) local_irq_save(flags); __netif_tx_lock(txq, smp_processor_id()); if (netif_tx_queue_stopped(txq) || + netif_tx_queue_frozen(txq) || dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { skb_queue_head(&npinfo->txq, skb); __netif_tx_unlock(txq); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index c7d484f7e1c..3284605f2ec 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3305,6 +3305,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) txq = netdev_get_tx_queue(odev, queue_map); if (netif_tx_queue_stopped(txq) || + netif_tx_queue_frozen(txq) || need_resched()) { idle_start = getCurUs(); @@ -3320,7 +3321,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) pkt_dev->idle_acc += getCurUs() - idle_start; - if (netif_tx_queue_stopped(txq)) { + if (netif_tx_queue_stopped(txq) || + netif_tx_queue_frozen(txq)) { pkt_dev->next_tx_us = getCurUs(); /* TODO */ pkt_dev->next_tx_ns = 0; goto out; /* Try the next interface */ @@ -3352,7 +3354,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) txq = netdev_get_tx_queue(odev, queue_map); __netif_tx_lock_bh(txq); - if (!netif_tx_queue_stopped(txq)) { + if (!netif_tx_queue_stopped(txq) && + !netif_tx_queue_frozen(txq)) { atomic_inc(&(pkt_dev->skb->users)); retry_now: diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 345838a2e36..9c9cd4d9489 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -135,7 +135,8 @@ static inline int qdisc_restart(struct Qdisc *q) txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_subqueue_stopped(dev, skb)) + if (!netif_tx_queue_stopped(txq) && + !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); @@ -162,7 +163,8 @@ static inline int qdisc_restart(struct Qdisc *q) break; } - if (ret && netif_tx_queue_stopped(txq)) + if (ret && (netif_tx_queue_stopped(txq) || + netif_tx_queue_frozen(txq))) ret = 0; return ret; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 537223642b6..2c35c678563 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -305,10 +305,11 @@ restart: switch (teql_resolve(skb, skb_res, slave)) { case 0: - if (netif_tx_trylock(slave)) { - if (!__netif_subqueue_stopped(slave, subq) && + if (__netif_tx_trylock(slave_txq)) { + if (!netif_tx_queue_stopped(slave_txq) && + !netif_tx_queue_frozen(slave_txq) && slave->hard_start_xmit(skb, slave) == 0) { - netif_tx_unlock(slave); + __netif_tx_unlock(slave_txq); master->slaves = NEXT_SLAVE(q); netif_wake_queue(dev); master->stats.tx_packets++; @@ -316,7 +317,7 @@ restart: qdisc_pkt_len(skb); return 0; } - netif_tx_unlock(slave); + __netif_tx_unlock(slave_txq); } if (netif_queue_stopped(dev)) busy = 1; |