diff options
Diffstat (limited to 'drivers/virtio')
| -rw-r--r-- | drivers/virtio/Kconfig | 8 | ||||
| -rw-r--r-- | drivers/virtio/virtio.c | 57 | ||||
| -rw-r--r-- | drivers/virtio/virtio_balloon.c | 209 | ||||
| -rw-r--r-- | drivers/virtio/virtio_mmio.c | 45 | ||||
| -rw-r--r-- | drivers/virtio/virtio_pci.c | 56 | ||||
| -rw-r--r-- | drivers/virtio/virtio_ring.c | 421 |
6 files changed, 527 insertions, 269 deletions
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 8d5bddb56cb..c6683f2e396 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -8,8 +8,8 @@ config VIRTIO menu "Virtio drivers" config VIRTIO_PCI - tristate "PCI driver for virtio devices (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL + tristate "PCI driver for virtio devices" + depends on PCI select VIRTIO ---help--- This drivers provides support for virtio based paravirtual device @@ -32,8 +32,8 @@ config VIRTIO_BALLOON If unsure, say M. config VIRTIO_MMIO - tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)" - depends on HAS_IOMEM && EXPERIMENTAL + tristate "Platform bus driver for memory mapped virtio devices" + depends on HAS_IOMEM select VIRTIO ---help--- This drivers provides support for memory mapped virtio diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 809b0de59c0..fed0ce198ae 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -10,33 +10,40 @@ static DEFINE_IDA(virtio_index_ida); static ssize_t device_show(struct device *_d, struct device_attribute *attr, char *buf) { - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.device); } +static DEVICE_ATTR_RO(device); + static ssize_t vendor_show(struct device *_d, struct device_attribute *attr, char *buf) { - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.vendor); } +static DEVICE_ATTR_RO(vendor); + static ssize_t status_show(struct device *_d, struct device_attribute *attr, char *buf) { - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); } +static DEVICE_ATTR_RO(status); + static ssize_t modalias_show(struct device *_d, struct device_attribute *attr, char *buf) { - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); - + struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "virtio:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } +static DEVICE_ATTR_RO(modalias); + static ssize_t features_show(struct device *_d, struct device_attribute *attr, char *buf) { - struct virtio_device *dev = container_of(_d, struct virtio_device, dev); + struct virtio_device *dev = dev_to_virtio(_d); unsigned int i; ssize_t len = 0; @@ -48,14 +55,17 @@ static ssize_t features_show(struct device *_d, len += sprintf(buf+len, "\n"); return len; } -static struct device_attribute virtio_dev_attrs[] = { - __ATTR_RO(device), - __ATTR_RO(vendor), - __ATTR_RO(status), - __ATTR_RO(modalias), - __ATTR_RO(features), - __ATTR_NULL +static DEVICE_ATTR_RO(features); + +static struct attribute *virtio_dev_attrs[] = { + &dev_attr_device.attr, + &dev_attr_vendor.attr, + &dev_attr_status.attr, + &dev_attr_modalias.attr, + &dev_attr_features.attr, + NULL, }; +ATTRIBUTE_GROUPS(virtio_dev); static inline int virtio_id_match(const struct virtio_device *dev, const struct virtio_device_id *id) @@ -71,10 +81,10 @@ static inline int virtio_id_match(const struct virtio_device *dev, static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) { unsigned int i; - struct virtio_device *dev = container_of(_dv,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_dv); const struct virtio_device_id *ids; - ids = container_of(_dr, struct virtio_driver, driver)->id_table; + ids = drv_to_virtio(_dr)->id_table; for (i = 0; ids[i].device; i++) if (virtio_id_match(dev, &ids[i])) return 1; @@ -83,7 +93,7 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) { - struct virtio_device *dev = container_of(_dv,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_dv); return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", dev->id.device, dev->id.vendor); @@ -98,8 +108,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit) { unsigned int i; - struct virtio_driver *drv = container_of(vdev->dev.driver, - struct virtio_driver, driver); + struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); for (i = 0; i < drv->feature_table_size; i++) if (drv->feature_table[i] == fbit) @@ -111,9 +120,8 @@ EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); static int virtio_dev_probe(struct device *_d) { int err, i; - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); - struct virtio_driver *drv = container_of(dev->dev.driver, - struct virtio_driver, driver); + struct virtio_device *dev = dev_to_virtio(_d); + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); u32 device_features; /* We have a driver! */ @@ -152,9 +160,8 @@ static int virtio_dev_probe(struct device *_d) static int virtio_dev_remove(struct device *_d) { - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); - struct virtio_driver *drv = container_of(dev->dev.driver, - struct virtio_driver, driver); + struct virtio_device *dev = dev_to_virtio(_d); + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); drv->remove(dev); @@ -169,7 +176,7 @@ static int virtio_dev_remove(struct device *_d) static struct bus_type virtio_bus = { .name = "virtio", .match = virtio_dev_match, - .dev_attrs = virtio_dev_attrs, + .dev_groups = virtio_dev_groups, .uevent = virtio_uevent, .probe = virtio_dev_probe, .remove = virtio_dev_remove, diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 0908e604433..25ebe8eecdb 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -27,13 +27,15 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/balloon_compaction.h> /* * Balloon device works in 4K page units. So each page is pointed to by * multiple balloon pages. All memory counters in this driver are in balloon * page units. */ -#define VIRTIO_BALLOON_PAGES_PER_PAGE (PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) +#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) +#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 struct virtio_balloon { @@ -52,15 +54,19 @@ struct virtio_balloon /* Number of balloon pages we've told the Host we're not using. */ unsigned int num_pages; /* - * The pages we've told the Host we're not using. + * The pages we've told the Host we're not using are enqueued + * at vb_dev_info->pages list. * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE * to num_pages above. */ - struct list_head pages; + struct balloon_dev_info *vb_dev_info; + + /* Synchronize access/update to this struct virtio_balloon elements */ + struct mutex balloon_lock; /* The array of pfns we tell the Host about. */ unsigned int num_pfns; - u32 pfns[256]; + u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; /* Memory statistics */ int need_stats_update; @@ -102,8 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); /* We should always be able to add one buffer to an empty queue. */ - if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0) - BUG(); + virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); virtqueue_kick(vq); /* When host has read buffer, this completes via balloon_ack */ @@ -122,33 +127,33 @@ static void set_page_pfns(u32 pfns[], struct page *page) static void fill_balloon(struct virtio_balloon *vb, size_t num) { + struct balloon_dev_info *vb_dev_info = vb->vb_dev_info; + /* We can only do one array worth at a time. */ num = min(num, ARRAY_SIZE(vb->pfns)); + mutex_lock(&vb->balloon_lock); for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { - struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY | - __GFP_NOMEMALLOC | __GFP_NOWARN); + struct page *page = balloon_page_enqueue(vb_dev_info); + if (!page) { - if (printk_ratelimit()) - dev_printk(KERN_INFO, &vb->vdev->dev, - "Out of puff! Can't get %zu pages\n", - num); + dev_info_ratelimited(&vb->vdev->dev, + "Out of puff! Can't get %u pages\n", + VIRTIO_BALLOON_PAGES_PER_PAGE); /* Sleep for at least 1/5 of a second before retry. */ msleep(200); break; } set_page_pfns(vb->pfns + vb->num_pfns, page); vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; - totalram_pages--; - list_add(&page->lru, &vb->pages); + adjust_managed_page_count(page, -1); } - /* Didn't get any? Oh well. */ - if (vb->num_pfns == 0) - return; - - tell_host(vb, vb->inflate_vq); + /* Did we get any? */ + if (vb->num_pfns != 0) + tell_host(vb, vb->inflate_vq); + mutex_unlock(&vb->balloon_lock); } static void release_pages_by_pfn(const u32 pfns[], unsigned int num) @@ -157,22 +162,26 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num) /* Find pfns pointing at start of each page, get pages and free them. */ for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { - __free_page(balloon_pfn_to_page(pfns[i])); - totalram_pages++; + struct page *page = balloon_pfn_to_page(pfns[i]); + balloon_page_free(page); + adjust_managed_page_count(page, 1); } } static void leak_balloon(struct virtio_balloon *vb, size_t num) { struct page *page; + struct balloon_dev_info *vb_dev_info = vb->vb_dev_info; /* We can only do one array worth at a time. */ num = min(num, ARRAY_SIZE(vb->pfns)); + mutex_lock(&vb->balloon_lock); for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { - page = list_first_entry(&vb->pages, struct page, lru); - list_del(&page->lru); + page = balloon_page_dequeue(vb_dev_info); + if (!page) + break; set_page_pfns(vb->pfns + vb->num_pfns, page); vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; } @@ -182,7 +191,9 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num) * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); * is true, we *have* to do it in this order */ - tell_host(vb, vb->deflate_vq); + if (vb->num_pfns != 0) + tell_host(vb, vb->deflate_vq); + mutex_unlock(&vb->balloon_lock); release_pages_by_pfn(vb->pfns, vb->num_pfns); } @@ -246,8 +257,7 @@ static void stats_handle_request(struct virtio_balloon *vb) if (!virtqueue_get_buf(vq, &len)) return; sg_init_one(&sg, vb->stats, sizeof(vb->stats)); - if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0) - BUG(); + virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); virtqueue_kick(vq); } @@ -263,9 +273,8 @@ static inline s64 towards_target(struct virtio_balloon *vb) __le32 v; s64 target; - vb->vdev->config->get(vb->vdev, - offsetof(struct virtio_balloon_config, num_pages), - &v, sizeof(v)); + virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v); + target = le32_to_cpu(v); return target - vb->num_pages; } @@ -274,9 +283,8 @@ static void update_balloon_size(struct virtio_balloon *vb) { __le32 actual = cpu_to_le32(vb->num_pages); - vb->vdev->config->set(vb->vdev, - offsetof(struct virtio_balloon_config, actual), - &actual, sizeof(actual)); + virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, + &actual); } static int balloon(void *_vballoon) @@ -300,6 +308,12 @@ static int balloon(void *_vballoon) else if (diff < 0) leak_balloon(vb, -diff); update_balloon_size(vb); + + /* + * For large balloon changes, we could spend a lot of time + * and always have work to do. Be nice if preempt disabled. + */ + cond_resched(); } return 0; } @@ -328,10 +342,10 @@ static int init_vqs(struct virtio_balloon *vb) /* * Prime this virtqueue with one buffer so the hypervisor can - * use it to signal us later. + * use it to signal us later (it can't be broken yet!). */ sg_init_one(&sg, vb->stats, sizeof vb->stats); - if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb, GFP_KERNEL) + if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) < 0) BUG(); virtqueue_kick(vb->stats_vq); @@ -339,9 +353,84 @@ static int init_vqs(struct virtio_balloon *vb) return 0; } +static const struct address_space_operations virtio_balloon_aops; +#ifdef CONFIG_BALLOON_COMPACTION +/* + * virtballoon_migratepage - perform the balloon page migration on behalf of + * a compation thread. (called under page lock) + * @mapping: the page->mapping which will be assigned to the new migrated page. + * @newpage: page that will replace the isolated page after migration finishes. + * @page : the isolated (old) page that is about to be migrated to newpage. + * @mode : compaction mode -- not used for balloon page migration. + * + * After a ballooned page gets isolated by compaction procedures, this is the + * function that performs the page migration on behalf of a compaction thread + * The page migration for virtio balloon is done in a simple swap fashion which + * follows these two macro steps: + * 1) insert newpage into vb->pages list and update the host about it; + * 2) update the host about the old page removed from vb->pages list; + * + * This function preforms the balloon page migration task. + * Called through balloon_mapping->a_ops->migratepage + */ +static int virtballoon_migratepage(struct address_space *mapping, + struct page *newpage, struct page *page, enum migrate_mode mode) +{ + struct balloon_dev_info *vb_dev_info = balloon_page_device(page); + struct virtio_balloon *vb; + unsigned long flags; + + BUG_ON(!vb_dev_info); + + vb = vb_dev_info->balloon_device; + + /* + * In order to avoid lock contention while migrating pages concurrently + * to leak_balloon() or fill_balloon() we just give up the balloon_lock + * this turn, as it is easier to retry the page migration later. + * This also prevents fill_balloon() getting stuck into a mutex + * recursion in the case it ends up triggering memory compaction + * while it is attempting to inflate the ballon. + */ + if (!mutex_trylock(&vb->balloon_lock)) + return -EAGAIN; + + /* balloon's page migration 1st step -- inflate "newpage" */ + spin_lock_irqsave(&vb_dev_info->pages_lock, flags); + balloon_page_insert(newpage, mapping, &vb_dev_info->pages); + vb_dev_info->isolated_pages--; + spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); + vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; + set_page_pfns(vb->pfns, newpage); + tell_host(vb, vb->inflate_vq); + + /* + * balloon's page migration 2nd step -- deflate "page" + * + * It's safe to delete page->lru here because this page is at + * an isolated migration list, and this step is expected to happen here + */ + balloon_page_delete(page); + vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; + set_page_pfns(vb->pfns, page); + tell_host(vb, vb->deflate_vq); + + mutex_unlock(&vb->balloon_lock); + + return MIGRATEPAGE_BALLOON_SUCCESS; +} + +/* define the balloon_mapping->a_ops callback to allow balloon page migration */ +static const struct address_space_operations virtio_balloon_aops = { + .migratepage = virtballoon_migratepage, +}; +#endif /* CONFIG_BALLOON_COMPACTION */ + static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; + struct address_space *vb_mapping; + struct balloon_dev_info *vb_devinfo; int err; vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); @@ -350,16 +439,37 @@ static int virtballoon_probe(struct virtio_device *vdev) goto out; } - INIT_LIST_HEAD(&vb->pages); vb->num_pages = 0; + mutex_init(&vb->balloon_lock); init_waitqueue_head(&vb->config_change); init_waitqueue_head(&vb->acked); vb->vdev = vdev; vb->need_stats_update = 0; + vb_devinfo = balloon_devinfo_alloc(vb); + if (IS_ERR(vb_devinfo)) { + err = PTR_ERR(vb_devinfo); + goto out_free_vb; + } + + vb_mapping = balloon_mapping_alloc(vb_devinfo, + (balloon_compaction_check()) ? + &virtio_balloon_aops : NULL); + if (IS_ERR(vb_mapping)) { + /* + * IS_ERR(vb_mapping) && PTR_ERR(vb_mapping) == -EOPNOTSUPP + * This means !CONFIG_BALLOON_COMPACTION, otherwise we get off. + */ + err = PTR_ERR(vb_mapping); + if (err != -EOPNOTSUPP) + goto out_free_vb_devinfo; + } + + vb->vb_dev_info = vb_devinfo; + err = init_vqs(vb); if (err) - goto out_free_vb; + goto out_free_vb_mapping; vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { @@ -371,6 +481,10 @@ static int virtballoon_probe(struct virtio_device *vdev) out_del_vqs: vdev->config->del_vqs(vdev); +out_free_vb_mapping: + balloon_mapping_free(vb_mapping); +out_free_vb_devinfo: + balloon_devinfo_free(vb_devinfo); out_free_vb: kfree(vb); out: @@ -390,16 +504,18 @@ static void remove_common(struct virtio_balloon *vb) vb->vdev->config->del_vqs(vb->vdev); } -static void __devexit virtballoon_remove(struct virtio_device *vdev) +static void virtballoon_remove(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; kthread_stop(vb->thread); remove_common(vb); + balloon_mapping_free(vb->vb_dev_info->mapping); + balloon_devinfo_free(vb->vb_dev_info); kfree(vb); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int virtballoon_freeze(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; @@ -440,26 +556,15 @@ static struct virtio_driver virtio_balloon_driver = { .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtballoon_probe, - .remove = __devexit_p(virtballoon_remove), + .remove = virtballoon_remove, .config_changed = virtballoon_changed, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .freeze = virtballoon_freeze, .restore = virtballoon_restore, #endif }; -static int __init init(void) -{ - return register_virtio_driver(&virtio_balloon_driver); -} - -static void __exit fini(void) -{ - unregister_virtio_driver(&virtio_balloon_driver); -} -module_init(init); -module_exit(fini); - +module_virtio_driver(virtio_balloon_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio balloon driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 6b1b7e18493..c600ccfd692 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -75,7 +75,7 @@ * * 0x050 W QueueNotify Queue notifier * 0x060 R InterruptStatus Interrupt status register - * 0x060 W InterruptACK Interrupt acknowledge register + * 0x064 W InterruptACK Interrupt acknowledge register * 0x070 RW Status Device status register * * 0x100+ RW Device-specific configuration space @@ -219,13 +219,14 @@ static void vm_reset(struct virtio_device *vdev) /* Transport interface */ /* the notify function used when creating a virt queue */ -static void vm_notify(struct virtqueue *vq) +static bool vm_notify(struct virtqueue *vq) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); /* We write the queue's selector into the notification register to * signal the other end */ - writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + return true; } /* Notify all virtqueues on an interrupt. */ @@ -266,7 +267,7 @@ static void vm_del_vq(struct virtqueue *vq) struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); struct virtio_mmio_vq_info *info = vq->priv; unsigned long flags, size; - unsigned int index = virtqueue_get_queue_index(vq); + unsigned int index = vq->index; spin_lock_irqsave(&vm_dev->lock, flags); list_del(&info->node); @@ -423,7 +424,7 @@ static const char *vm_bus_name(struct virtio_device *vdev) return vm_dev->pdev->name; } -static struct virtio_config_ops virtio_mmio_config_ops = { +static const struct virtio_config_ops virtio_mmio_config_ops = { .get = vm_get, .set = vm_set, .get_status = vm_get_status, @@ -440,7 +441,7 @@ static struct virtio_config_ops virtio_mmio_config_ops = { /* Platform device */ -static int __devinit virtio_mmio_probe(struct platform_device *pdev) +static int virtio_mmio_probe(struct platform_device *pdev) { struct virtio_mmio_device *vm_dev; struct resource *mem; @@ -470,7 +471,7 @@ static int __devinit virtio_mmio_probe(struct platform_device *pdev) /* Check magic value */ magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); - if (memcmp(&magic, "virt", 4) != 0) { + if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); return -ENODEV; } @@ -493,7 +494,7 @@ static int __devinit virtio_mmio_probe(struct platform_device *pdev) return register_virtio_device(&vm_dev->vdev); } -static int __devexit virtio_mmio_remove(struct platform_device *pdev) +static int virtio_mmio_remove(struct platform_device *pdev) { struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); @@ -521,25 +522,33 @@ static int vm_cmdline_set(const char *device, int err; struct resource resources[2] = {}; char *str; - long long int base; + long long int base, size; + unsigned int irq; int processed, consumed = 0; struct platform_device *pdev; - resources[0].flags = IORESOURCE_MEM; - resources[1].flags = IORESOURCE_IRQ; - - resources[0].end = memparse(device, &str) - 1; + /* Consume "size" part of the command line parameter */ + size = memparse(device, &str); + /* Get "@<base>:<irq>[:<id>]" chunks */ processed = sscanf(str, "@%lli:%u%n:%d%n", - &base, &resources[1].start, &consumed, + &base, &irq, &consumed, &vm_cmdline_id, &consumed); - if (processed < 2 || processed > 3 || str[consumed]) + /* + * sscanf() must processes at least 2 chunks; also there + * must be no extra characters after the last chunk, so + * str[consumed] must be '\0' + */ + if (processed < 2 || str[consumed]) return -EINVAL; + resources[0].flags = IORESOURCE_MEM; resources[0].start = base; - resources[0].end += base; - resources[1].end = resources[1].start; + resources[0].end = base + size - 1; + + resources[1].flags = IORESOURCE_IRQ; + resources[1].start = resources[1].end = irq; if (!vm_cmdline_parent_registered) { err = device_register(&vm_cmdline_parent); @@ -630,7 +639,7 @@ MODULE_DEVICE_TABLE(of, virtio_mmio_match); static struct platform_driver virtio_mmio_driver = { .probe = virtio_mmio_probe, - .remove = __devexit_p(virtio_mmio_remove), + .remove = virtio_mmio_remove, .driver = { .name = "virtio-mmio", .owner = THIS_MODULE, diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index c33aea36598..101db3faf5d 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -91,9 +91,9 @@ struct virtio_pci_vq_info }; /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ -static struct pci_device_id virtio_pci_id_table[] = { - { 0x1af4, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0 }, +static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = { + { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, + { 0 } }; MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); @@ -197,14 +197,14 @@ static void vp_reset(struct virtio_device *vdev) } /* the notify function used when creating a virt queue */ -static void vp_notify(struct virtqueue *vq) +static bool vp_notify(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); /* we write the queue's selector into the notification register to * signal the other end */ - iowrite16(virtqueue_get_queue_index(vq), - vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); + iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); + return true; } /* Handle a configuration change: Tell driver if it wants to know. */ @@ -290,9 +290,9 @@ static void vp_free_vectors(struct virtio_device *vdev) pci_disable_msix(vp_dev->pci_dev); vp_dev->msix_enabled = 0; - vp_dev->msix_vectors = 0; } + vp_dev->msix_vectors = 0; vp_dev->msix_used_vectors = 0; kfree(vp_dev->msix_names); vp_dev->msix_names = NULL; @@ -310,6 +310,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, unsigned i, v; int err = -ENOMEM; + vp_dev->msix_vectors = nvectors; + vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, GFP_KERNEL); if (!vp_dev->msix_entries) @@ -331,13 +333,10 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, for (i = 0; i < nvectors; ++i) vp_dev->msix_entries[i].entry = i; - /* pci_enable_msix returns positive if we can't get this many. */ - err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); - if (err > 0) - err = -ENOSPC; + err = pci_enable_msix_exact(vp_dev->pci_dev, + vp_dev->msix_entries, nvectors); if (err) goto error; - vp_dev->msix_vectors = nvectors; vp_dev->msix_enabled = 1; /* Set the vector used for configuration */ @@ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq) list_del(&info->node); spin_unlock_irqrestore(&vp_dev->lock, flags); - iowrite16(virtqueue_get_queue_index(vq), - vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, @@ -654,7 +652,7 @@ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) return 0; } -static struct virtio_config_ops virtio_pci_config_ops = { +static const struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, .get_status = vp_get_status, @@ -678,8 +676,8 @@ static void virtio_pci_release_dev(struct device *_d) } /* the PCI probing function */ -static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, - const struct pci_device_id *id) +static int virtio_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) { struct virtio_pci_device *vp_dev; int err; @@ -742,7 +740,6 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, return 0; out_set_drvdata: - pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); out_req_regions: pci_release_regions(pci_dev); @@ -753,21 +750,20 @@ out: return err; } -static void __devexit virtio_pci_remove(struct pci_dev *pci_dev) +static void virtio_pci_remove(struct pci_dev *pci_dev) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); unregister_virtio_device(&vp_dev->vdev); vp_del_vqs(&vp_dev->vdev); - pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); kfree(vp_dev); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int virtio_pci_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); @@ -824,22 +820,10 @@ static struct pci_driver virtio_pci_driver = { .name = "virtio-pci", .id_table = virtio_pci_id_table, .probe = virtio_pci_probe, - .remove = __devexit_p(virtio_pci_remove), -#ifdef CONFIG_PM + .remove = virtio_pci_remove, +#ifdef CONFIG_PM_SLEEP .driver.pm = &virtio_pci_pm_ops, #endif }; -static int __init virtio_pci_init(void) -{ - return pci_register_driver(&virtio_pci_driver); -} - -module_init(virtio_pci_init); - -static void __exit virtio_pci_exit(void) -{ - pci_unregister_driver(&virtio_pci_driver); -} - -module_exit(virtio_pci_exit); +module_pci_driver(virtio_pci_driver); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index e639584b2db..4d08f45a9c2 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -23,27 +23,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/hrtimer.h> - -/* virtio guest is communicating with a virtual "device" that actually runs on - * a host processor. Memory barriers are used to control SMP effects. */ -#ifdef CONFIG_SMP -/* Where possible, use SMP barriers which are more lightweight than mandatory - * barriers, because mandatory barriers control MMIO effects on accesses - * through relaxed memory I/O windows (which virtio-pci does not use). */ -#define virtio_mb(vq) \ - do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0) -#define virtio_rmb(vq) \ - do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) -#define virtio_wmb(vq) \ - do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0) -#else -/* We must force memory ordering even if guest is UP since host could be - * running on another CPU, but SMP barriers are defined to barrier() in that - * configuration. So fall back to mandatory barriers instead. */ -#define virtio_mb(vq) mb() -#define virtio_rmb(vq) rmb() -#define virtio_wmb(vq) wmb() -#endif +#include <linux/kmemleak.h> #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ @@ -93,8 +73,6 @@ struct vring_virtqueue /* Host publishes avail event idx */ bool event; - /* Number of free buffers */ - unsigned int num_free; /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ @@ -104,10 +82,7 @@ struct vring_virtqueue u16 last_used_idx; /* How to notify other side. FIXME: commonalize hcalls! */ - void (*notify)(struct virtqueue *vq); - - /* Index of the queue */ - int queue_index; + bool (*notify)(struct virtqueue *vq); #ifdef DEBUG /* They're supposed to lock for us. */ @@ -124,48 +99,83 @@ struct vring_virtqueue #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) +static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, + unsigned int *count) +{ + return sg_next(sg); +} + +static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, + unsigned int *count) +{ + if (--(*count) == 0) + return NULL; + return sg + 1; +} + /* Set up an indirect table of descriptors and add it to the queue. */ -static int vring_add_indirect(struct vring_virtqueue *vq, - struct scatterlist sg[], - unsigned int out, - unsigned int in, - gfp_t gfp) +static inline int vring_add_indirect(struct vring_virtqueue *vq, + struct scatterlist *sgs[], + struct scatterlist *(*next) + (struct scatterlist *, unsigned int *), + unsigned int total_sg, + unsigned int total_out, + unsigned int total_in, + unsigned int out_sgs, + unsigned int in_sgs, + gfp_t gfp) { struct vring_desc *desc; unsigned head; - int i; + struct scatterlist *sg; + int i, n; - desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); + /* + * We require lowmem mappings for the descriptors because + * otherwise virt_to_phys will give us bogus addresses in the + * virtqueue. + */ + gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); + + desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); if (!desc) return -ENOMEM; - /* Transfer entries from the sg list into the indirect page */ - for (i = 0; i < out; i++) { - desc[i].flags = VRING_DESC_F_NEXT; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; - desc[i].next = i+1; - sg++; + /* Transfer entries from the sg lists into the indirect page */ + i = 0; + for (n = 0; n < out_sgs; n++) { + for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { + desc[i].flags = VRING_DESC_F_NEXT; + desc[i].addr = sg_phys(sg); + desc[i].len = sg->length; + desc[i].next = i+1; + i++; + } } - for (; i < (out + in); i++) { - desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; - desc[i].next = i+1; - sg++; + for (; n < (out_sgs + in_sgs); n++) { + for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { + desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; + desc[i].addr = sg_phys(sg); + desc[i].len = sg->length; + desc[i].next = i+1; + i++; + } } + BUG_ON(i != total_sg); /* Last one doesn't continue. */ desc[i-1].flags &= ~VRING_DESC_F_NEXT; desc[i-1].next = 0; /* We're about to use a buffer */ - vq->num_free--; + vq->vq.num_free--; /* Use a single buffer which doesn't continue */ head = vq->free_head; vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; vq->vring.desc[head].addr = virt_to_phys(desc); + /* kmemleak gives a false positive, as it's hidden by virt_to_phys */ + kmemleak_ignore(desc); vq->vring.desc[head].len = i * sizeof(struct vring_desc); /* Update free pointer */ @@ -174,45 +184,31 @@ static int vring_add_indirect(struct vring_virtqueue *vq, return head; } -int virtqueue_get_queue_index(struct virtqueue *_vq) -{ - struct vring_virtqueue *vq = to_vvq(_vq); - return vq->queue_index; -} -EXPORT_SYMBOL_GPL(virtqueue_get_queue_index); - -/** - * virtqueue_add_buf - expose buffer to other end - * @vq: the struct virtqueue we're talking about. - * @sg: the description of the buffer(s). - * @out_num: the number of sg readable by other side - * @in_num: the number of sg which are writable (after readable ones) - * @data: the token identifying the buffer. - * @gfp: how to do memory allocations (if necessary). - * - * Caller must ensure we don't call this with other virtqueue operations - * at the same time (except where noted). - * - * Returns remaining capacity of queue or a negative error - * (ie. ENOSPC). Note that it only really makes sense to treat all - * positive return values as "available": indirect buffers mean that - * we can put an entire sg[] array inside a single queue entry. - */ -int virtqueue_add_buf(struct virtqueue *_vq, - struct scatterlist sg[], - unsigned int out, - unsigned int in, - void *data, - gfp_t gfp) +static inline int virtqueue_add(struct virtqueue *_vq, + struct scatterlist *sgs[], + struct scatterlist *(*next) + (struct scatterlist *, unsigned int *), + unsigned int total_out, + unsigned int total_in, + unsigned int out_sgs, + unsigned int in_sgs, + void *data, + gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); - unsigned int i, avail, uninitialized_var(prev); + struct scatterlist *sg; + unsigned int i, n, avail, uninitialized_var(prev), total_sg; int head; START_USE(vq); BUG_ON(data == NULL); + if (unlikely(vq->broken)) { + END_USE(vq); + return -EIO; + } + #ifdef DEBUG { ktime_t now = ktime_get(); @@ -226,46 +222,54 @@ int virtqueue_add_buf(struct virtqueue *_vq, } #endif + total_sg = total_in + total_out; + /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ - if (vq->indirect && (out + in) > 1 && vq->num_free) { - head = vring_add_indirect(vq, sg, out, in, gfp); + if (vq->indirect && total_sg > 1 && vq->vq.num_free) { + head = vring_add_indirect(vq, sgs, next, total_sg, total_out, + total_in, + out_sgs, in_sgs, gfp); if (likely(head >= 0)) goto add_head; } - BUG_ON(out + in > vq->vring.num); - BUG_ON(out + in == 0); + BUG_ON(total_sg > vq->vring.num); + BUG_ON(total_sg == 0); - if (vq->num_free < out + in) { + if (vq->vq.num_free < total_sg) { pr_debug("Can't add buf len %i - avail = %i\n", - out + in, vq->num_free); + total_sg, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ - if (out) + if (out_sgs) vq->notify(&vq->vq); END_USE(vq); return -ENOSPC; } /* We're about to use some buffers from the free list. */ - vq->num_free -= out + in; - - head = vq->free_head; - for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { - vq->vring.desc[i].flags = VRING_DESC_F_NEXT; - vq->vring.desc[i].addr = sg_phys(sg); - vq->vring.desc[i].len = sg->length; - prev = i; - sg++; + vq->vq.num_free -= total_sg; + + head = i = vq->free_head; + for (n = 0; n < out_sgs; n++) { + for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { + vq->vring.desc[i].flags = VRING_DESC_F_NEXT; + vq->vring.desc[i].addr = sg_phys(sg); + vq->vring.desc[i].len = sg->length; + prev = i; + i = vq->vring.desc[i].next; + } } - for (; in; i = vq->vring.desc[i].next, in--) { - vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; - vq->vring.desc[i].addr = sg_phys(sg); - vq->vring.desc[i].len = sg->length; - prev = i; - sg++; + for (; n < (out_sgs + in_sgs); n++) { + for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { + vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; + vq->vring.desc[i].addr = sg_phys(sg); + vq->vring.desc[i].len = sg->length; + prev = i; + i = vq->vring.desc[i].next; + } } /* Last one doesn't continue. */ vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; @@ -284,7 +288,7 @@ add_head: /* Descriptors and available array need to be set before we expose the * new available array entries. */ - virtio_wmb(vq); + virtio_wmb(vq->weak_barriers); vq->vring.avail->idx++; vq->num_added++; @@ -296,9 +300,91 @@ add_head: pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); - return vq->num_free; + return 0; +} + +/** + * virtqueue_add_sgs - expose buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sgs: array of terminated scatterlists. + * @out_num: the number of scatterlists readable by other side + * @in_num: the number of scatterlists which are writable (after readable ones) + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int virtqueue_add_sgs(struct virtqueue *_vq, + struct scatterlist *sgs[], + unsigned int out_sgs, + unsigned int in_sgs, + void *data, + gfp_t gfp) +{ + unsigned int i, total_out, total_in; + + /* Count them first. */ + for (i = total_out = total_in = 0; i < out_sgs; i++) { + struct scatterlist *sg; + for (sg = sgs[i]; sg; sg = sg_next(sg)) + total_out++; + } + for (; i < out_sgs + in_sgs; i++) { + struct scatterlist *sg; + for (sg = sgs[i]; sg; sg = sg_next(sg)) + total_in++; + } + return virtqueue_add(_vq, sgs, sg_next_chained, + total_out, total_in, out_sgs, in_sgs, data, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_sgs); + +/** + * virtqueue_add_outbuf - expose output buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sgs: array of scatterlists (need not be terminated!) + * @num: the number of scatterlists readable by other side + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int virtqueue_add_outbuf(struct virtqueue *vq, + struct scatterlist sg[], unsigned int num, + void *data, + gfp_t gfp) +{ + return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); + +/** + * virtqueue_add_inbuf - expose input buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sgs: array of scatterlists (need not be terminated!) + * @num: the number of scatterlists writable by other side + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int virtqueue_add_inbuf(struct virtqueue *vq, + struct scatterlist sg[], unsigned int num, + void *data, + gfp_t gfp) +{ + return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); } -EXPORT_SYMBOL_GPL(virtqueue_add_buf); +EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. @@ -320,7 +406,7 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) START_USE(vq); /* We need to expose available array entries before checking avail * event. */ - virtio_mb(vq); + virtio_mb(vq->weak_barriers); old = vq->vring.avail->idx - vq->num_added; new = vq->vring.avail->idx; @@ -350,13 +436,22 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); * @vq: the struct virtqueue * * This does not need to be serialized. + * + * Returns false if host notify failed or queue is broken, otherwise true. */ -void virtqueue_notify(struct virtqueue *_vq) +bool virtqueue_notify(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); + if (unlikely(vq->broken)) + return false; + /* Prod other side to tell it about changes. */ - vq->notify(_vq); + if (!vq->notify(_vq)) { + vq->broken = true; + return false; + } + return true; } EXPORT_SYMBOL_GPL(virtqueue_notify); @@ -364,16 +459,19 @@ EXPORT_SYMBOL_GPL(virtqueue_notify); * virtqueue_kick - update after add_buf * @vq: the struct virtqueue * - * After one or more virtqueue_add_buf calls, invoke this to kick + * After one or more virtqueue_add_* calls, invoke this to kick * the other side. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). + * + * Returns false if kick failed, otherwise true. */ -void virtqueue_kick(struct virtqueue *vq) +bool virtqueue_kick(struct virtqueue *vq) { if (virtqueue_kick_prepare(vq)) - virtqueue_notify(vq); + return virtqueue_notify(vq); + return true; } EXPORT_SYMBOL_GPL(virtqueue_kick); @@ -393,13 +491,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { i = vq->vring.desc[i].next; - vq->num_free++; + vq->vq.num_free++; } vq->vring.desc[i].next = vq->free_head; vq->free_head = head; /* Plus final descriptor */ - vq->num_free++; + vq->vq.num_free++; } static inline bool more_used(const struct vring_virtqueue *vq) @@ -421,7 +519,7 @@ static inline bool more_used(const struct vring_virtqueue *vq) * operations at the same time (except where noted). * * Returns NULL if there are no used buffers, or the "data" token - * handed to virtqueue_add_buf(). + * handed to virtqueue_add_*(). */ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { @@ -444,7 +542,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) } /* Only get used array entries after they have been exposed by host. */ - virtio_rmb(vq); + virtio_rmb(vq->weak_barriers); last_used = (vq->last_used_idx & (vq->vring.num - 1)); i = vq->vring.used->ring[last_used].id; @@ -468,7 +566,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) * the read in the next get_buf call. */ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { vring_used_event(&vq->vring) = vq->last_used_idx; - virtio_mb(vq); + virtio_mb(vq->weak_barriers); } #ifdef DEBUG @@ -498,19 +596,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq) EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** - * virtqueue_enable_cb - restart callbacks after disable_cb. + * virtqueue_enable_cb_prepare - restart callbacks after disable_cb * @vq: the struct virtqueue we're talking about. * - * This re-enables callbacks; it returns "false" if there are pending - * buffers in the queue, to detect a possible race between the driver - * checking for more work, and enabling callbacks. + * This re-enables callbacks; it returns current queue state + * in an opaque unsigned value. This value should be later tested by + * virtqueue_poll, to detect a possible race between the driver checking for + * more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ -bool virtqueue_enable_cb(struct virtqueue *_vq) +unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); + u16 last_used_idx; START_USE(vq); @@ -520,15 +620,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; - vring_used_event(&vq->vring) = vq->last_used_idx; - virtio_mb(vq); - if (unlikely(more_used(vq))) { - END_USE(vq); - return false; - } - + vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; END_USE(vq); - return true; + return last_used_idx; +} +EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); + +/** + * virtqueue_poll - query pending used buffers + * @vq: the struct virtqueue we're talking about. + * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). + * + * Returns "true" if there are pending used buffers in the queue. + * + * This does not need to be serialized. + */ +bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + virtio_mb(vq->weak_barriers); + return (u16)last_used_idx != vq->vring.used->idx; +} +EXPORT_SYMBOL_GPL(virtqueue_poll); + +/** + * virtqueue_enable_cb - restart callbacks after disable_cb. + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks; it returns "false" if there are pending + * buffers in the queue, to detect a possible race between the driver + * checking for more work, and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +bool virtqueue_enable_cb(struct virtqueue *_vq) +{ + unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); + return !virtqueue_poll(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); @@ -561,7 +691,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) /* TODO: tune this threshold */ bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; vring_used_event(&vq->vring) = vq->last_used_idx + bufs; - virtio_mb(vq); + virtio_mb(vq->weak_barriers); if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { END_USE(vq); return false; @@ -576,7 +706,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); * virtqueue_detach_unused_buf - detach first unused buffer * @vq: the struct virtqueue we're talking about. * - * Returns NULL or the "data" token handed to virtqueue_add_buf(). + * Returns NULL or the "data" token handed to virtqueue_add_*(). * This is not valid on an active queue; it is useful only for device * shutdown. */ @@ -599,7 +729,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) return buf; } /* That should have freed everything. */ - BUG_ON(vq->num_free != vq->vring.num); + BUG_ON(vq->vq.num_free != vq->vring.num); END_USE(vq); return NULL; @@ -632,7 +762,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, struct virtio_device *vdev, bool weak_barriers, void *pages, - void (*notify)(struct virtqueue *), + bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { @@ -653,12 +783,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; + vq->vq.num_free = num; + vq->vq.index = index; vq->notify = notify; vq->weak_barriers = weak_barriers; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; - vq->queue_index = index; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; @@ -673,7 +804,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* Put everything in free lists. */ - vq->num_free = num; vq->free_head = 0; for (i = 0; i < num-1; i++) { vq->vring.desc[i].next = i+1; @@ -727,4 +857,27 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) } EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); +bool virtqueue_is_broken(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->broken; +} +EXPORT_SYMBOL_GPL(virtqueue_is_broken); + +/* + * This should prevent the device from being used, allowing drivers to + * recover. You may need to grab appropriate locks to flush. + */ +void virtio_break_device(struct virtio_device *dev) +{ + struct virtqueue *_vq; + + list_for_each_entry(_vq, &dev->vqs, list) { + struct vring_virtqueue *vq = to_vvq(_vq); + vq->broken = true; + } +} +EXPORT_SYMBOL_GPL(virtio_break_device); + MODULE_LICENSE("GPL"); |
