diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-08 23:13:53 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-08 23:13:53 -0700 |
commit | c773e847ea8f6812804e40f52399c6921a00eab1 (patch) | |
tree | 952e0e262cc0b0f2136bc2a62938ae1d186f896a /net | |
parent | eb6aafe3f843cb0e939546c03540a3b4911b6964 (diff) |
netdev: Move _xmit_lock and xmit_lock_owner into netdev_queue.
Accesses are mostly structured such that when there are multiple TX
queues the code transformations will be a little bit simpler.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/8021q/vlan_dev.c | 15 | ||||
-rw-r--r-- | net/core/dev.c | 28 | ||||
-rw-r--r-- | net/netrom/af_netrom.c | 12 | ||||
-rw-r--r-- | net/rose/af_rose.c | 12 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 9 |
5 files changed, 59 insertions, 17 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index b6e52c025fd..8efa399823e 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -627,6 +627,18 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) */ static struct lock_class_key vlan_netdev_xmit_lock_key; +static void vlan_dev_set_lockdep_one(struct netdev_queue *txq, + int subclass) +{ + lockdep_set_class_and_subclass(&txq->_xmit_lock, + &vlan_netdev_xmit_lock_key, subclass); +} + +static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) +{ + vlan_dev_set_lockdep_one(&dev->tx_queue, subclass); +} + static const struct header_ops vlan_header_ops = { .create = vlan_dev_hard_header, .rebuild = vlan_dev_rebuild_header, @@ -668,8 +680,7 @@ static int vlan_dev_init(struct net_device *dev) if (is_vlan_dev(real_dev)) subclass = 1; - lockdep_set_class_and_subclass(&dev->_xmit_lock, - &vlan_netdev_xmit_lock_key, subclass); + vlan_dev_set_lockdep_class(dev, subclass); return 0; } diff --git a/net/core/dev.c b/net/core/dev.c index 0218b0b9be8..a29a359b15d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -258,7 +258,7 @@ DEFINE_PER_CPU(struct softnet_data, softnet_data); #ifdef CONFIG_DEBUG_LOCK_ALLOC /* - * register_netdevice() inits dev->_xmit_lock and sets lockdep class + * register_netdevice() inits txq->_xmit_lock and sets lockdep class * according to dev->type */ static const unsigned short netdev_lock_type[] = @@ -1758,19 +1758,19 @@ gso: if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ - if (dev->xmit_lock_owner != cpu) { + if (txq->xmit_lock_owner != cpu) { - HARD_TX_LOCK(dev, cpu); + HARD_TX_LOCK(dev, txq, cpu); if (!netif_queue_stopped(dev) && !netif_subqueue_stopped(dev, skb)) { rc = 0; if (!dev_hard_start_xmit(skb, dev)) { - HARD_TX_UNLOCK(dev); + HARD_TX_UNLOCK(dev, txq); goto out; } } - HARD_TX_UNLOCK(dev); + HARD_TX_UNLOCK(dev, txq); if (net_ratelimit()) printk(KERN_CRIT "Virtual device %s asks to " "queue packet!\n", dev->name); @@ -3761,6 +3761,20 @@ static void rollback_registered(struct net_device *dev) dev_put(dev); } +static void __netdev_init_queue_locks_one(struct netdev_queue *dev_queue, + struct net_device *dev) +{ + spin_lock_init(&dev_queue->_xmit_lock); + netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type); + dev_queue->xmit_lock_owner = -1; +} + +static void netdev_init_queue_locks(struct net_device *dev) +{ + __netdev_init_queue_locks_one(&dev->tx_queue, dev); + __netdev_init_queue_locks_one(&dev->rx_queue, dev); +} + /** * register_netdevice - register a network device * @dev: device to register @@ -3795,9 +3809,7 @@ int register_netdevice(struct net_device *dev) BUG_ON(!dev_net(dev)); net = dev_net(dev); - spin_lock_init(&dev->_xmit_lock); - netdev_set_lockdep_class(&dev->_xmit_lock, dev->type); - dev->xmit_lock_owner = -1; + netdev_init_queue_locks(dev); dev->iflink = -1; diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 74884f4a625..819afc449e1 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -74,6 +74,16 @@ static const struct proto_ops nr_proto_ops; */ static struct lock_class_key nr_netdev_xmit_lock_key; +static void nr_set_lockdep_one(struct netdev_queue *txq) +{ + lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); +} + +static void nr_set_lockdep_key(struct net_device *dev) +{ + nr_set_lockdep_one(&dev->tx_queue); +} + /* * Socket removal during an interrupt is now safe. */ @@ -1430,7 +1440,7 @@ static int __init nr_proto_init(void) free_netdev(dev); goto fail; } - lockdep_set_class(&dev->_xmit_lock, &nr_netdev_xmit_lock_key); + nr_set_lockdep_key(dev); dev_nr[i] = dev; } diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 46461a69cd0..7dbbc089162 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -75,6 +75,16 @@ ax25_address rose_callsign; */ static struct lock_class_key rose_netdev_xmit_lock_key; +static void rose_set_lockdep_one(struct netdev_queue *txq) +{ + lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); +} + +static void rose_set_lockdep_key(struct net_device *dev) +{ + rose_set_lockdep_one(&dev->tx_queue); +} + /* * Convert a ROSE address into text. */ @@ -1576,7 +1586,7 @@ static int __init rose_proto_init(void) free_netdev(dev); goto fail; } - lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key); + rose_set_lockdep_key(dev); dev_rose[i] = dev; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index fcc7533f0bc..b6a36d39466 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -92,10 +92,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, struct netdev_queue *dev_queue, struct Qdisc *q) { - struct net_device *dev = dev_queue->dev; int ret; - if (unlikely(dev->xmit_lock_owner == smp_processor_id())) { + if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { /* * Same CPU holding the lock. It may be a transient * configuration error, when hard_start_xmit() recurses. We @@ -105,7 +104,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, kfree_skb(skb); if (net_ratelimit()) printk(KERN_WARNING "Dead loop on netdevice %s, " - "fix it urgently!\n", dev->name); + "fix it urgently!\n", dev_queue->dev->name); ret = qdisc_qlen(q); } else { /* @@ -155,10 +154,10 @@ static inline int qdisc_restart(struct netdev_queue *txq) dev = txq->dev; - HARD_TX_LOCK(dev, smp_processor_id()); + HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_subqueue_stopped(dev, skb)) ret = dev_hard_start_xmit(skb, dev); - HARD_TX_UNLOCK(dev); + HARD_TX_UNLOCK(dev, txq); spin_lock(&txq->lock); q = txq->qdisc; |