diff options
Diffstat (limited to 'virt/kvm/eventfd.c')
| -rw-r--r-- | virt/kvm/eventfd.c | 376 | 
1 files changed, 324 insertions, 52 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index c1f1e3c6298..20c3af7692c 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -31,10 +31,12 @@  #include <linux/list.h>  #include <linux/eventfd.h>  #include <linux/kernel.h> +#include <linux/srcu.h>  #include <linux/slab.h>  #include "iodev.h" +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING  /*   * --------------------------------------------------------------------   * irqfd: Allows an fd to be used to inject an interrupt to the guest @@ -43,15 +45,51 @@   * --------------------------------------------------------------------   */ +/* + * Resampling irqfds are a special variety of irqfds used to emulate + * level triggered interrupts.  The interrupt is asserted on eventfd + * trigger.  On acknowledgement through the irq ack notifier, the + * interrupt is de-asserted and userspace is notified through the + * resamplefd.  All resamplers on the same gsi are de-asserted + * together, so we don't need to track the state of each individual + * user.  We can also therefore share the same irq source ID. + */ +struct _irqfd_resampler { +	struct kvm *kvm; +	/* +	 * List of resampling struct _irqfd objects sharing this gsi. +	 * RCU list modified under kvm->irqfds.resampler_lock +	 */ +	struct list_head list; +	struct kvm_irq_ack_notifier notifier; +	/* +	 * Entry in list of kvm->irqfd.resampler_list.  Use for sharing +	 * resamplers among irqfds on the same gsi. +	 * Accessed and modified under kvm->irqfds.resampler_lock +	 */ +	struct list_head link; +}; +  struct _irqfd { -	struct kvm               *kvm; -	struct eventfd_ctx       *eventfd; -	int                       gsi; -	struct list_head          list; -	poll_table                pt; -	wait_queue_t              wait; -	struct work_struct        inject; -	struct work_struct        shutdown; +	/* Used for MSI fast-path */ +	struct kvm *kvm; +	wait_queue_t wait; +	/* Update side is protected by irqfds.lock */ +	struct kvm_kernel_irq_routing_entry __rcu *irq_entry; +	/* Used for level IRQ fast-path */ +	int gsi; +	struct work_struct inject; +	/* The resampler used by this irqfd (resampler-only) */ +	struct _irqfd_resampler *resampler; +	/* Eventfd notified on resample (resampler-only) */ +	struct eventfd_ctx *resamplefd; +	/* Entry in list of irqfds for a resampler (resampler-only) */ +	struct list_head resampler_link; +	/* Used for setup/shutdown */ +	struct eventfd_ctx *eventfd; +	struct list_head list; +	poll_table pt; +	struct work_struct shutdown;  };  static struct workqueue_struct *irqfd_cleanup_wq; @@ -62,8 +100,63 @@ irqfd_inject(struct work_struct *work)  	struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);  	struct kvm *kvm = irqfd->kvm; -	kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); -	kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); +	if (!irqfd->resampler) { +		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, +				false); +		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, +				false); +	} else +		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, +			    irqfd->gsi, 1, false); +} + +/* + * Since resampler irqfds share an IRQ source ID, we de-assert once + * then notify all of the resampler irqfds using this GSI.  We can't + * do multiple de-asserts or we risk racing with incoming re-asserts. + */ +static void +irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian) +{ +	struct _irqfd_resampler *resampler; +	struct kvm *kvm; +	struct _irqfd *irqfd; +	int idx; + +	resampler = container_of(kian, struct _irqfd_resampler, notifier); +	kvm = resampler->kvm; + +	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, +		    resampler->notifier.gsi, 0, false); + +	idx = srcu_read_lock(&kvm->irq_srcu); + +	list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link) +		eventfd_signal(irqfd->resamplefd, 1); + +	srcu_read_unlock(&kvm->irq_srcu, idx); +} + +static void +irqfd_resampler_shutdown(struct _irqfd *irqfd) +{ +	struct _irqfd_resampler *resampler = irqfd->resampler; +	struct kvm *kvm = resampler->kvm; + +	mutex_lock(&kvm->irqfds.resampler_lock); + +	list_del_rcu(&irqfd->resampler_link); +	synchronize_srcu(&kvm->irq_srcu); + +	if (list_empty(&resampler->list)) { +		list_del(&resampler->link); +		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); +		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, +			    resampler->notifier.gsi, 0, false); +		kfree(resampler); +	} + +	mutex_unlock(&kvm->irqfds.resampler_lock);  }  /* @@ -87,6 +180,11 @@ irqfd_shutdown(struct work_struct *work)  	 */  	flush_work(&irqfd->inject); +	if (irqfd->resampler) { +		irqfd_resampler_shutdown(irqfd); +		eventfd_ctx_put(irqfd->resamplefd); +	} +  	/*  	 * It is now safe to release the object's resources  	 */ @@ -125,14 +223,24 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)  {  	struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);  	unsigned long flags = (unsigned long)key; +	struct kvm_kernel_irq_routing_entry *irq; +	struct kvm *kvm = irqfd->kvm; +	int idx; -	if (flags & POLLIN) +	if (flags & POLLIN) { +		idx = srcu_read_lock(&kvm->irq_srcu); +		irq = srcu_dereference(irqfd->irq_entry, &kvm->irq_srcu);  		/* An event has been signaled, inject an interrupt */ -		schedule_work(&irqfd->inject); +		if (irq) +			kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, +					false); +		else +			schedule_work(&irqfd->inject); +		srcu_read_unlock(&kvm->irq_srcu, idx); +	}  	if (flags & POLLHUP) {  		/* The eventfd is closing, detach from KVM */ -		struct kvm *kvm = irqfd->kvm;  		unsigned long flags;  		spin_lock_irqsave(&kvm->irqfds.lock, flags); @@ -163,12 +271,33 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,  	add_wait_queue(wqh, &irqfd->wait);  } +/* Must be called under irqfds.lock */ +static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd, +			 struct kvm_irq_routing_table *irq_rt) +{ +	struct kvm_kernel_irq_routing_entry *e; + +	if (irqfd->gsi >= irq_rt->nr_rt_entries) { +		rcu_assign_pointer(irqfd->irq_entry, NULL); +		return; +	} + +	hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) { +		/* Only fast-path MSI. */ +		if (e->type == KVM_IRQ_ROUTING_MSI) +			rcu_assign_pointer(irqfd->irq_entry, e); +		else +			rcu_assign_pointer(irqfd->irq_entry, NULL); +	} +} +  static int -kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) +kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)  { +	struct kvm_irq_routing_table *irq_rt;  	struct _irqfd *irqfd, *tmp; -	struct file *file = NULL; -	struct eventfd_ctx *eventfd = NULL; +	struct fd f; +	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;  	int ret;  	unsigned int events; @@ -177,18 +306,18 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)  		return -ENOMEM;  	irqfd->kvm = kvm; -	irqfd->gsi = gsi; +	irqfd->gsi = args->gsi;  	INIT_LIST_HEAD(&irqfd->list);  	INIT_WORK(&irqfd->inject, irqfd_inject);  	INIT_WORK(&irqfd->shutdown, irqfd_shutdown); -	file = eventfd_fget(fd); -	if (IS_ERR(file)) { -		ret = PTR_ERR(file); -		goto fail; +	f = fdget(args->fd); +	if (!f.file) { +		ret = -EBADF; +		goto out;  	} -	eventfd = eventfd_ctx_fileget(file); +	eventfd = eventfd_ctx_fileget(f.file);  	if (IS_ERR(eventfd)) {  		ret = PTR_ERR(eventfd);  		goto fail; @@ -196,6 +325,54 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)  	irqfd->eventfd = eventfd; +	if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) { +		struct _irqfd_resampler *resampler; + +		resamplefd = eventfd_ctx_fdget(args->resamplefd); +		if (IS_ERR(resamplefd)) { +			ret = PTR_ERR(resamplefd); +			goto fail; +		} + +		irqfd->resamplefd = resamplefd; +		INIT_LIST_HEAD(&irqfd->resampler_link); + +		mutex_lock(&kvm->irqfds.resampler_lock); + +		list_for_each_entry(resampler, +				    &kvm->irqfds.resampler_list, link) { +			if (resampler->notifier.gsi == irqfd->gsi) { +				irqfd->resampler = resampler; +				break; +			} +		} + +		if (!irqfd->resampler) { +			resampler = kzalloc(sizeof(*resampler), GFP_KERNEL); +			if (!resampler) { +				ret = -ENOMEM; +				mutex_unlock(&kvm->irqfds.resampler_lock); +				goto fail; +			} + +			resampler->kvm = kvm; +			INIT_LIST_HEAD(&resampler->list); +			resampler->notifier.gsi = irqfd->gsi; +			resampler->notifier.irq_acked = irqfd_resampler_ack; +			INIT_LIST_HEAD(&resampler->link); + +			list_add(&resampler->link, &kvm->irqfds.resampler_list); +			kvm_register_irq_ack_notifier(kvm, +						      &resampler->notifier); +			irqfd->resampler = resampler; +		} + +		list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); +		synchronize_srcu(&kvm->irq_srcu); + +		mutex_unlock(&kvm->irqfds.resampler_lock); +	} +  	/*  	 * Install our own custom wake-up handling so we are notified via  	 * a callback whenever someone signals the underlying eventfd @@ -215,64 +392,90 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)  		goto fail;  	} -	events = file->f_op->poll(file, &irqfd->pt); +	irq_rt = rcu_dereference_protected(kvm->irq_routing, +					   lockdep_is_held(&kvm->irqfds.lock)); +	irqfd_update(kvm, irqfd, irq_rt);  	list_add_tail(&irqfd->list, &kvm->irqfds.items); +	spin_unlock_irq(&kvm->irqfds.lock); +  	/*  	 * Check if there was an event already pending on the eventfd  	 * before we registered, and trigger it as if we didn't miss it.  	 */ +	events = f.file->f_op->poll(f.file, &irqfd->pt); +  	if (events & POLLIN)  		schedule_work(&irqfd->inject); -	spin_unlock_irq(&kvm->irqfds.lock); -  	/*  	 * do not drop the file until the irqfd is fully initialized, otherwise  	 * we might race against the POLLHUP  	 */ -	fput(file); +	fdput(f);  	return 0;  fail: +	if (irqfd->resampler) +		irqfd_resampler_shutdown(irqfd); + +	if (resamplefd && !IS_ERR(resamplefd)) +		eventfd_ctx_put(resamplefd); +  	if (eventfd && !IS_ERR(eventfd))  		eventfd_ctx_put(eventfd); -	if (!IS_ERR(file)) -		fput(file); +	fdput(f); +out:  	kfree(irqfd);  	return ret;  } +#endif  void  kvm_eventfd_init(struct kvm *kvm)  { +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING  	spin_lock_init(&kvm->irqfds.lock);  	INIT_LIST_HEAD(&kvm->irqfds.items); +	INIT_LIST_HEAD(&kvm->irqfds.resampler_list); +	mutex_init(&kvm->irqfds.resampler_lock); +#endif  	INIT_LIST_HEAD(&kvm->ioeventfds);  } +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING  /*   * shutdown any irqfd's that match fd+gsi   */  static int -kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi) +kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)  {  	struct _irqfd *irqfd, *tmp;  	struct eventfd_ctx *eventfd; -	eventfd = eventfd_ctx_fdget(fd); +	eventfd = eventfd_ctx_fdget(args->fd);  	if (IS_ERR(eventfd))  		return PTR_ERR(eventfd);  	spin_lock_irq(&kvm->irqfds.lock);  	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { -		if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) +		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { +			/* +			 * This rcu_assign_pointer is needed for when +			 * another thread calls kvm_irq_routing_update before +			 * we flush workqueue below (we synchronize with +			 * kvm_irq_routing_update using irqfds.lock). +			 * It is paired with synchronize_srcu done by caller +			 * of that function. +			 */ +			rcu_assign_pointer(irqfd->irq_entry, NULL);  			irqfd_deactivate(irqfd); +		}  	}  	spin_unlock_irq(&kvm->irqfds.lock); @@ -289,12 +492,15 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)  }  int -kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) +kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)  { -	if (flags & KVM_IRQFD_FLAG_DEASSIGN) -		return kvm_irqfd_deassign(kvm, fd, gsi); +	if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE)) +		return -EINVAL; + +	if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) +		return kvm_irqfd_deassign(kvm, args); -	return kvm_irqfd_assign(kvm, fd, gsi); +	return kvm_irqfd_assign(kvm, args);  }  /* @@ -322,11 +528,30 @@ kvm_irqfd_release(struct kvm *kvm)  }  /* + * Change irq_routing and irqfd. + * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards. + */ +void kvm_irq_routing_update(struct kvm *kvm, +			    struct kvm_irq_routing_table *irq_rt) +{ +	struct _irqfd *irqfd; + +	spin_lock_irq(&kvm->irqfds.lock); + +	rcu_assign_pointer(kvm->irq_routing, irq_rt); + +	list_for_each_entry(irqfd, &kvm->irqfds.items, list) +		irqfd_update(kvm, irqfd, irq_rt); + +	spin_unlock_irq(&kvm->irqfds.lock); +} + +/*   * create a host-wide workqueue for issuing deferred shutdown requests   * aggregated from all vm* instances. We need our own isolated single-thread   * queue to prevent deadlock against flushing the normal work-queue.   */ -static int __init irqfd_module_init(void) +int kvm_irqfd_init(void)  {  	irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");  	if (!irqfd_cleanup_wq) @@ -335,13 +560,11 @@ static int __init irqfd_module_init(void)  	return 0;  } -static void __exit irqfd_module_exit(void) +void kvm_irqfd_exit(void)  {  	destroy_workqueue(irqfd_cleanup_wq);  } - -module_init(irqfd_module_init); -module_exit(irqfd_module_exit); +#endif  /*   * -------------------------------------------------------------------- @@ -359,6 +582,7 @@ struct _ioeventfd {  	struct eventfd_ctx  *eventfd;  	u64                  datamatch;  	struct kvm_io_device dev; +	u8                   bus_idx;  	bool                 wildcard;  }; @@ -381,7 +605,15 @@ ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)  {  	u64 _val; -	if (!(addr == p->addr && len == p->length)) +	if (addr != p->addr) +		/* address must be precise for a hit */ +		return false; + +	if (!p->length) +		/* length = 0 means only look at the address, so always a hit */ +		return true; + +	if (len != p->length)  		/* address-range must be precise for a hit */  		return false; @@ -451,25 +683,38 @@ ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)  	struct _ioeventfd *_p;  	list_for_each_entry(_p, &kvm->ioeventfds, list) -		if (_p->addr == p->addr && _p->length == p->length && -		    (_p->wildcard || p->wildcard || -		     _p->datamatch == p->datamatch)) +		if (_p->bus_idx == p->bus_idx && +		    _p->addr == p->addr && +		    (!_p->length || !p->length || +		     (_p->length == p->length && +		      (_p->wildcard || p->wildcard || +		       _p->datamatch == p->datamatch))))  			return true;  	return false;  } +static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags) +{ +	if (flags & KVM_IOEVENTFD_FLAG_PIO) +		return KVM_PIO_BUS; +	if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY) +		return KVM_VIRTIO_CCW_NOTIFY_BUS; +	return KVM_MMIO_BUS; +} +  static int  kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)  { -	int                       pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; -	enum kvm_bus              bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS; +	enum kvm_bus              bus_idx;  	struct _ioeventfd        *p;  	struct eventfd_ctx       *eventfd;  	int                       ret; -	/* must be natural-word sized */ +	bus_idx = ioeventfd_bus_from_flags(args->flags); +	/* must be natural-word sized, or 0 to ignore length */  	switch (args->len) { +	case 0:  	case 1:  	case 2:  	case 4: @@ -487,6 +732,12 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)  	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)  		return -EINVAL; +	/* ioeventfd with no length can't be combined with DATAMATCH */ +	if (!args->len && +	    args->flags & (KVM_IOEVENTFD_FLAG_PIO | +			   KVM_IOEVENTFD_FLAG_DATAMATCH)) +		return -EINVAL; +  	eventfd = eventfd_ctx_fdget(args->fd);  	if (IS_ERR(eventfd))  		return PTR_ERR(eventfd); @@ -499,6 +750,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)  	INIT_LIST_HEAD(&p->list);  	p->addr    = args->addr; +	p->bus_idx = bus_idx;  	p->length  = args->len;  	p->eventfd = eventfd; @@ -510,7 +762,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)  	mutex_lock(&kvm->slots_lock); -	/* Verify that there isnt a match already */ +	/* Verify that there isn't a match already */  	if (ioeventfd_check_collision(kvm, p)) {  		ret = -EEXIST;  		goto unlock_fail; @@ -518,16 +770,30 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)  	kvm_iodevice_init(&p->dev, &ioeventfd_ops); -	ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev); +	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length, +				      &p->dev);  	if (ret < 0)  		goto unlock_fail; +	/* When length is ignored, MMIO is also put on a separate bus, for +	 * faster lookups. +	 */ +	if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) { +		ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS, +					      p->addr, 0, &p->dev); +		if (ret < 0) +			goto register_fail; +	} + +	kvm->buses[bus_idx]->ioeventfd_count++;  	list_add_tail(&p->list, &kvm->ioeventfds);  	mutex_unlock(&kvm->slots_lock);  	return 0; +register_fail: +	kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);  unlock_fail:  	mutex_unlock(&kvm->slots_lock); @@ -541,12 +807,12 @@ fail:  static int  kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)  { -	int                       pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; -	enum kvm_bus              bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS; +	enum kvm_bus              bus_idx;  	struct _ioeventfd        *p, *tmp;  	struct eventfd_ctx       *eventfd;  	int                       ret = -ENOENT; +	bus_idx = ioeventfd_bus_from_flags(args->flags);  	eventfd = eventfd_ctx_fdget(args->fd);  	if (IS_ERR(eventfd))  		return PTR_ERR(eventfd); @@ -556,7 +822,8 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)  	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {  		bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); -		if (p->eventfd != eventfd  || +		if (p->bus_idx != bus_idx || +		    p->eventfd != eventfd  ||  		    p->addr != args->addr  ||  		    p->length != args->len ||  		    p->wildcard != wildcard) @@ -566,6 +833,11 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)  			continue;  		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); +		if (!p->length) { +			kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS, +						  &p->dev); +		} +		kvm->buses[bus_idx]->ioeventfd_count--;  		ioeventfd_release(p);  		ret = 0;  		break;  | 
