aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-01-29 15:32:13 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-29 15:32:13 -0500
commitf1e7b73acc26e8908af783bcd3a9900fd80688f5 (patch)
tree9a9382fb7f12f1889020efb4bffa3f4a88589fc5 /drivers/net/virtio_net.c
parent218774dc341f219bfcf940304a081b121a0e8099 (diff)
parentfc16e884a2320198b8cb7bc2fdcf6b4485e79709 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Bring in the 'net' tree so that we can get some ipv4/ipv6 bug fixes that some net-next work will build upon. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c118
1 files changed, 98 insertions, 20 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 58914c8ea68..eda2042a646 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -26,6 +26,7 @@
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
@@ -123,6 +124,12 @@ struct virtnet_info {
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
+
+ /* Per-cpu variable to show the mapping from CPU to virtqueue */
+ int __percpu *vq_index;
+
+ /* CPU hot plug notifier */
+ struct notifier_block nb;
};
struct skb_vnet_hdr {
@@ -1030,32 +1037,75 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
return 0;
}
-static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
+static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
{
int i;
+ int cpu;
+
+ if (vi->affinity_hint_set) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtqueue_set_affinity(vi->rq[i].vq, -1);
+ virtqueue_set_affinity(vi->sq[i].vq, -1);
+ }
+
+ vi->affinity_hint_set = false;
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ if (cpu == hcpu) {
+ *per_cpu_ptr(vi->vq_index, cpu) = -1;
+ } else {
+ *per_cpu_ptr(vi->vq_index, cpu) =
+ ++i % vi->curr_queue_pairs;
+ }
+ }
+}
+
+static void virtnet_set_affinity(struct virtnet_info *vi)
+{
+ int i;
+ int cpu;
/* In multiqueue mode, when the number of cpu is equal to the number of
* queue pairs, we let the queue pairs to be private to one cpu by
* setting the affinity hint to eliminate the contention.
*/
- if ((vi->curr_queue_pairs == 1 ||
- vi->max_queue_pairs != num_online_cpus()) && set) {
- if (vi->affinity_hint_set)
- set = false;
- else
- return;
+ if (vi->curr_queue_pairs == 1 ||
+ vi->max_queue_pairs != num_online_cpus()) {
+ virtnet_clean_affinity(vi, -1);
+ return;
}
- for (i = 0; i < vi->max_queue_pairs; i++) {
- int cpu = set ? i : -1;
+ i = 0;
+ for_each_online_cpu(cpu) {
virtqueue_set_affinity(vi->rq[i].vq, cpu);
virtqueue_set_affinity(vi->sq[i].vq, cpu);
+ *per_cpu_ptr(vi->vq_index, cpu) = i;
+ i++;
}
- if (set)
- vi->affinity_hint_set = true;
- else
- vi->affinity_hint_set = false;
+ vi->affinity_hint_set = true;
+}
+
+static int virtnet_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+
+ switch(action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ case CPU_DEAD:
+ virtnet_set_affinity(vi);
+ break;
+ case CPU_DOWN_PREPARE:
+ virtnet_clean_affinity(vi, (long)hcpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
}
static void virtnet_get_ringparam(struct net_device *dev,
@@ -1099,13 +1149,15 @@ static int virtnet_set_channels(struct net_device *dev,
if (queue_pairs > vi->max_queue_pairs)
return -EINVAL;
+ get_online_cpus();
err = virtnet_set_queues(vi, queue_pairs);
if (!err) {
netif_set_real_num_tx_queues(dev, queue_pairs);
netif_set_real_num_rx_queues(dev, queue_pairs);
- virtnet_set_affinity(vi, true);
+ virtnet_set_affinity(vi);
}
+ put_online_cpus();
return err;
}
@@ -1144,12 +1196,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
/* To avoid contending a lock hold by a vcpu who would exit to host, select the
* txq based on the processor id.
- * TODO: handle cpu hotplug.
*/
static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- smp_processor_id();
+ int txq;
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ if (skb_rx_queue_recorded(skb)) {
+ txq = skb_get_rx_queue(skb);
+ } else {
+ txq = *__this_cpu_ptr(vi->vq_index);
+ if (txq == -1)
+ txq = 0;
+ }
while (unlikely(txq >= dev->real_num_tx_queues))
txq -= dev->real_num_tx_queues;
@@ -1265,7 +1324,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
{
struct virtio_device *vdev = vi->vdev;
- virtnet_set_affinity(vi, false);
+ virtnet_clean_affinity(vi, -1);
vdev->config->del_vqs(vdev);
@@ -1388,7 +1447,10 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
- virtnet_set_affinity(vi, true);
+ get_online_cpus();
+ virtnet_set_affinity(vi);
+ put_online_cpus();
+
return 0;
err_free:
@@ -1470,6 +1532,10 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->stats == NULL)
goto free;
+ vi->vq_index = alloc_percpu(int);
+ if (vi->vq_index == NULL)
+ goto free_stats;
+
mutex_init(&vi->config_lock);
vi->config_enable = true;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -1493,7 +1559,7 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
- goto free_stats;
+ goto free_index;
netif_set_real_num_tx_queues(dev, 1);
netif_set_real_num_rx_queues(dev, 1);
@@ -1516,6 +1582,13 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
+ vi->nb.notifier_call = &virtnet_cpu_callback;
+ err = register_hotcpu_notifier(&vi->nb);
+ if (err) {
+ pr_debug("virtio_net: registering cpu notifier failed\n");
+ goto free_recv_bufs;
+ }
+
/* Assume link up if device can't report link status,
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
@@ -1537,6 +1610,8 @@ free_recv_bufs:
free_vqs:
cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi);
+free_index:
+ free_percpu(vi->vq_index);
free_stats:
free_percpu(vi->stats);
free:
@@ -1560,6 +1635,8 @@ static void virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
+ unregister_hotcpu_notifier(&vi->nb);
+
/* Prevent config work handler from accessing the device. */
mutex_lock(&vi->config_lock);
vi->config_enable = false;
@@ -1571,6 +1648,7 @@ static void virtnet_remove(struct virtio_device *vdev)
flush_work(&vi->config_work);
+ free_percpu(vi->vq_index);
free_percpu(vi->stats);
free_netdev(vi->dev);
}