diff options
Diffstat (limited to 'drivers/vhost/test.c')
| -rw-r--r-- | drivers/vhost/test.c | 76 |
1 files changed, 47 insertions, 29 deletions
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 099f30230d0..d9c501eaa6c 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -13,12 +13,11 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/workqueue.h> -#include <linux/rcupdate.h> #include <linux/file.h> #include <linux/slab.h> #include "test.h" -#include "vhost.c" +#include "vhost.h" /* Max number of bytes transferred before requeueing the job. * Using this limit prevents one virtqueue from starving others. */ @@ -38,21 +37,23 @@ struct vhost_test { * read-size critical section for our kind of RCU. */ static void handle_vq(struct vhost_test *n) { - struct vhost_virtqueue *vq = &n->dev.vqs[VHOST_TEST_VQ]; + struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; unsigned out, in; int head; size_t len, total_len = 0; void *private; - private = rcu_dereference_check(vq->private_data, 1); - if (!private) + mutex_lock(&vq->mutex); + private = vq->private_data; + if (!private) { + mutex_unlock(&vq->mutex); return; + } - mutex_lock(&vq->mutex); - vhost_disable_notify(vq); + vhost_disable_notify(&n->dev, vq); for (;;) { - head = vhost_get_vq_desc(&n->dev, vq, vq->iov, + head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, NULL, NULL); @@ -61,8 +62,8 @@ static void handle_vq(struct vhost_test *n) break; /* Nothing new? Wait for eventfd to tell us they refilled. */ if (head == vq->num) { - if (unlikely(vhost_enable_notify(vq))) { - vhost_disable_notify(vq); + if (unlikely(vhost_enable_notify(&n->dev, vq))) { + vhost_disable_notify(&n->dev, vq); continue; } break; @@ -102,18 +103,20 @@ static int vhost_test_open(struct inode *inode, struct file *f) { struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL); struct vhost_dev *dev; - int r; + struct vhost_virtqueue **vqs; if (!n) return -ENOMEM; + vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL); + if (!vqs) { + kfree(n); + return -ENOMEM; + } dev = &n->dev; + vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; - r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX); - if (r < 0) { - kfree(n); - return r; - } + vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); f->private_data = n; @@ -126,9 +129,8 @@ static void *vhost_test_stop_vq(struct vhost_test *n, void *private; mutex_lock(&vq->mutex); - private = rcu_dereference_protected(vq->private_data, - lockdep_is_held(&vq->mutex)); - rcu_assign_pointer(vq->private_data, NULL); + private = vq->private_data; + vq->private_data = NULL; mutex_unlock(&vq->mutex); return private; } @@ -140,7 +142,7 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep) static void vhost_test_flush_vq(struct vhost_test *n, int index) { - vhost_poll_flush(&n->dev.vqs[index].poll); + vhost_poll_flush(&n->vqs[index].poll); } static void vhost_test_flush(struct vhost_test *n) @@ -155,7 +157,7 @@ static int vhost_test_release(struct inode *inode, struct file *f) vhost_test_stop(n, &private); vhost_test_flush(n); - vhost_dev_cleanup(&n->dev); + vhost_dev_cleanup(&n->dev, false); /* We do an extra flush before freeing memory, * since jobs can re-queue themselves. */ vhost_test_flush(n); @@ -191,12 +193,16 @@ static long vhost_test_run(struct vhost_test *n, int test) priv = test ? n : NULL; /* start polling new socket */ - oldpriv = rcu_dereference_protected(vq->private_data, - lockdep_is_held(&vq->mutex)); - rcu_assign_pointer(vq->private_data, priv); + oldpriv = vq->private_data; + vq->private_data = priv; + + r = vhost_init_used(&n->vqs[index]); mutex_unlock(&vq->mutex); + if (r) + goto err; + if (oldpriv) { vhost_test_flush_vq(n, index); } @@ -214,13 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n) { void *priv = NULL; long err; + struct vhost_memory *memory; + mutex_lock(&n->dev.mutex); err = vhost_dev_check_owner(&n->dev); if (err) goto done; + memory = vhost_dev_reset_owner_prepare(); + if (!memory) { + err = -ENOMEM; + goto done; + } vhost_test_stop(n, &priv); vhost_test_flush(n); - err = vhost_dev_reset_owner(&n->dev); + vhost_dev_reset_owner(&n->dev, memory); done: mutex_unlock(&n->dev.mutex); return err; @@ -228,15 +241,18 @@ done: static int vhost_test_set_features(struct vhost_test *n, u64 features) { + struct vhost_virtqueue *vq; + mutex_lock(&n->dev.mutex); if ((features & (1 << VHOST_F_LOG_ALL)) && !vhost_log_access_ok(&n->dev)) { mutex_unlock(&n->dev.mutex); return -EFAULT; } - n->dev.acked_features = features; - smp_wmb(); - vhost_test_flush(n); + vq = &n->vqs[VHOST_TEST_VQ]; + mutex_lock(&vq->mutex); + vq->acked_features = features; + mutex_unlock(&vq->mutex); mutex_unlock(&n->dev.mutex); return 0; } @@ -270,7 +286,9 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl, return vhost_test_reset_owner(n); default: mutex_lock(&n->dev.mutex); - r = vhost_dev_ioctl(&n->dev, ioctl, arg); + r = vhost_dev_ioctl(&n->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + r = vhost_vring_ioctl(&n->dev, ioctl, argp); vhost_test_flush(n); mutex_unlock(&n->dev.mutex); return r; |
