diff options
Diffstat (limited to 'drivers/vhost/net.c')
| -rw-r--r-- | drivers/vhost/net.c | 115 | 
1 files changed, 68 insertions, 47 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 831eb4fd197..8dae2f724a3 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -17,6 +17,7 @@  #include <linux/workqueue.h>  #include <linux/file.h>  #include <linux/slab.h> +#include <linux/vmalloc.h>  #include <linux/net.h>  #include <linux/if_packet.h> @@ -70,7 +71,12 @@ enum {  };  struct vhost_net_ubuf_ref { -	struct kref kref; +	/* refcount follows semantics similar to kref: +	 *  0: object is released +	 *  1: no outstanding ubufs +	 * >1: outstanding ubufs +	 */ +	atomic_t refcount;  	wait_queue_head_t wait;  	struct vhost_virtqueue *vq;  }; @@ -116,14 +122,6 @@ static void vhost_net_enable_zcopy(int vq)  	vhost_net_zcopy_mask |= 0x1 << vq;  } -static void vhost_net_zerocopy_done_signal(struct kref *kref) -{ -	struct vhost_net_ubuf_ref *ubufs; - -	ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref); -	wake_up(&ubufs->wait); -} -  static struct vhost_net_ubuf_ref *  vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)  { @@ -134,21 +132,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)  	ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);  	if (!ubufs)  		return ERR_PTR(-ENOMEM); -	kref_init(&ubufs->kref); +	atomic_set(&ubufs->refcount, 1);  	init_waitqueue_head(&ubufs->wait);  	ubufs->vq = vq;  	return ubufs;  } -static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) +static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)  { -	kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); +	int r = atomic_sub_return(1, &ubufs->refcount); +	if (unlikely(!r)) +		wake_up(&ubufs->wait); +	return r;  }  static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)  { -	kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); -	wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); +	vhost_net_ubuf_put(ubufs); +	wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));  }  static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) @@ -306,23 +307,26 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)  {  	struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;  	struct vhost_virtqueue *vq = ubufs->vq; -	int cnt = atomic_read(&ubufs->kref.refcount); +	int cnt; + +	rcu_read_lock_bh();  	/* set len to mark this desc buffers done DMA */  	vq->heads[ubuf->desc].len = success ?  		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; -	vhost_net_ubuf_put(ubufs); +	cnt = vhost_net_ubuf_put(ubufs);  	/*  	 * Trigger polling thread if guest stopped submitting new buffers: -	 * in this case, the refcount after decrement will eventually reach 1 -	 * so here it is 2. +	 * in this case, the refcount after decrement will eventually reach 1.  	 * We also trigger polling periodically after each 16 packets  	 * (the value 16 here is more or less arbitrary, it's tuned to trigger  	 * less than 10% of times).  	 */ -	if (cnt <= 2 || !(cnt % 16)) +	if (cnt <= 1 || !(cnt % 16))  		vhost_poll_queue(&vq->poll); + +	rcu_read_unlock_bh();  }  /* Expects to be always run from workqueue - which acts as @@ -370,7 +374,7 @@ static void handle_tx(struct vhost_net *net)  			      % UIO_MAXIOV == nvq->done_idx))  			break; -		head = vhost_get_vq_desc(&net->dev, vq, vq->iov, +		head = vhost_get_vq_desc(vq, vq->iov,  					 ARRAY_SIZE(vq->iov),  					 &out, &in,  					 NULL, NULL); @@ -420,7 +424,7 @@ static void handle_tx(struct vhost_net *net)  			msg.msg_control = ubuf;  			msg.msg_controllen = sizeof(ubuf);  			ubufs = nvq->ubufs; -			kref_get(&ubufs->kref); +			atomic_inc(&ubufs->refcount);  			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;  		} else {  			msg.msg_control = NULL; @@ -502,9 +506,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,  			r = -ENOBUFS;  			goto err;  		} -		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, +		r = vhost_get_vq_desc(vq, vq->iov + seg,  				      ARRAY_SIZE(vq->iov) - seg, &out,  				      &in, log, log_num); +		if (unlikely(r < 0)) +			goto err; + +		d = r;  		if (d == vq->num) {  			r = 0;  			goto err; @@ -529,6 +537,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,  	*iovcount = seg;  	if (unlikely(log))  		*log_num = nlogs; + +	/* Detect overrun */ +	if (unlikely(datalen > 0)) { +		r = UIO_MAXIOV + 1; +		goto err; +	}  	return headcount;  err:  	vhost_discard_vq_desc(vq, headcount); @@ -571,9 +585,9 @@ static void handle_rx(struct vhost_net *net)  	vhost_hlen = nvq->vhost_hlen;  	sock_hlen = nvq->sock_hlen; -	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? +	vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?  		vq->log : NULL; -	mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF); +	mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);  	while ((sock_len = peek_head_len(sock->sk))) {  		sock_len += sock_hlen; @@ -584,6 +598,14 @@ static void handle_rx(struct vhost_net *net)  		/* On error, stop handling until the next kick. */  		if (unlikely(headcount < 0))  			break; +		/* On overrun, truncate and discard */ +		if (unlikely(headcount > UIO_MAXIOV)) { +			msg.msg_iovlen = 1; +			err = sock->ops->recvmsg(NULL, sock, &msg, +						 1, MSG_DONTWAIT | MSG_TRUNC); +			pr_debug("Discarded rx packet: len %zd\n", sock_len); +			continue; +		}  		/* OK, now we need to know about added descriptors. */  		if (!headcount) {  			if (unlikely(vhost_enable_notify(&net->dev, vq))) { @@ -680,16 +702,20 @@ static void handle_rx_net(struct vhost_work *work)  static int vhost_net_open(struct inode *inode, struct file *f)  { -	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); +	struct vhost_net *n;  	struct vhost_dev *dev;  	struct vhost_virtqueue **vqs; -	int r, i; +	int i; -	if (!n) -		return -ENOMEM; +	n = kmalloc(sizeof *n, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); +	if (!n) { +		n = vmalloc(sizeof *n); +		if (!n) +			return -ENOMEM; +	}  	vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);  	if (!vqs) { -		kfree(n); +		kvfree(n);  		return -ENOMEM;  	} @@ -706,12 +732,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)  		n->vqs[i].vhost_hlen = 0;  		n->vqs[i].sock_hlen = 0;  	} -	r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); -	if (r < 0) { -		kfree(n); -		kfree(vqs); -		return r; -	} +	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);  	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);  	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); @@ -785,7 +806,7 @@ static void vhost_net_flush(struct vhost_net *n)  		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);  		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);  		n->tx_flush = false; -		kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); +		atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);  		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);  	}  } @@ -802,14 +823,16 @@ static int vhost_net_release(struct inode *inode, struct file *f)  	vhost_dev_cleanup(&n->dev, false);  	vhost_net_vq_reset(n);  	if (tx_sock) -		fput(tx_sock->file); +		sockfd_put(tx_sock);  	if (rx_sock) -		fput(rx_sock->file); +		sockfd_put(rx_sock); +	/* Make sure no callbacks are outstanding */ +	synchronize_rcu_bh();  	/* We do an extra flush before freeing memory,  	 * since jobs can re-queue themselves. */  	vhost_net_flush(n);  	kfree(n->dev.vqs); -	kfree(n); +	kvfree(n);  	return 0;  } @@ -842,7 +865,7 @@ static struct socket *get_raw_socket(int fd)  	}  	return sock;  err: -	fput(sock->file); +	sockfd_put(sock);  	return ERR_PTR(r);  } @@ -948,7 +971,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)  	if (oldsock) {  		vhost_net_flush_vq(n, index); -		fput(oldsock->file); +		sockfd_put(oldsock);  	}  	mutex_unlock(&n->dev.mutex); @@ -960,7 +983,7 @@ err_used:  	if (ubufs)  		vhost_net_ubuf_put_wait_and_free(ubufs);  err_ubufs: -	fput(sock->file); +	sockfd_put(sock);  err_vq:  	mutex_unlock(&vq->mutex);  err: @@ -991,9 +1014,9 @@ static long vhost_net_reset_owner(struct vhost_net *n)  done:  	mutex_unlock(&n->dev.mutex);  	if (tx_sock) -		fput(tx_sock->file); +		sockfd_put(tx_sock);  	if (rx_sock) -		fput(rx_sock->file); +		sockfd_put(rx_sock);  	return err;  } @@ -1020,15 +1043,13 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)  		mutex_unlock(&n->dev.mutex);  		return -EFAULT;  	} -	n->dev.acked_features = features; -	smp_wmb();  	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {  		mutex_lock(&n->vqs[i].vq.mutex); +		n->vqs[i].vq.acked_features = features;  		n->vqs[i].vhost_hlen = vhost_hlen;  		n->vqs[i].sock_hlen = sock_hlen;  		mutex_unlock(&n->vqs[i].vq.mutex);  	} -	vhost_net_flush(n);  	mutex_unlock(&n->dev.mutex);  	return 0;  }  | 
