aboutsummaryrefslogtreecommitdiff
path: root/drivers/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/Kconfig8
-rw-r--r--drivers/virtio/virtio.c27
-rw-r--r--drivers/virtio/virtio_balloon.c55
-rw-r--r--drivers/virtio/virtio_mmio.c9
-rw-r--r--drivers/virtio/virtio_pci.c28
-rw-r--r--drivers/virtio/virtio_ring.c385
6 files changed, 338 insertions, 174 deletions
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 8d5bddb56cb..c6683f2e396 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -8,8 +8,8 @@ config VIRTIO
menu "Virtio drivers"
config VIRTIO_PCI
- tristate "PCI driver for virtio devices (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "PCI driver for virtio devices"
+ depends on PCI
select VIRTIO
---help---
This drivers provides support for virtio based paravirtual device
@@ -32,8 +32,8 @@ config VIRTIO_BALLOON
If unsure, say M.
config VIRTIO_MMIO
- tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
- depends on HAS_IOMEM && EXPERIMENTAL
+ tristate "Platform bus driver for memory mapped virtio devices"
+ depends on HAS_IOMEM
select VIRTIO
---help---
This drivers provides support for memory mapped virtio
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index ee59b74768d..fed0ce198ae 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -13,18 +13,24 @@ static ssize_t device_show(struct device *_d,
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.device);
}
+static DEVICE_ATTR_RO(device);
+
static ssize_t vendor_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.vendor);
}
+static DEVICE_ATTR_RO(vendor);
+
static ssize_t status_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%08x\n", dev->config->get_status(dev));
}
+static DEVICE_ATTR_RO(status);
+
static ssize_t modalias_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
@@ -32,6 +38,8 @@ static ssize_t modalias_show(struct device *_d,
return sprintf(buf, "virtio:d%08Xv%08X\n",
dev->id.device, dev->id.vendor);
}
+static DEVICE_ATTR_RO(modalias);
+
static ssize_t features_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
@@ -47,14 +55,17 @@ static ssize_t features_show(struct device *_d,
len += sprintf(buf+len, "\n");
return len;
}
-static struct device_attribute virtio_dev_attrs[] = {
- __ATTR_RO(device),
- __ATTR_RO(vendor),
- __ATTR_RO(status),
- __ATTR_RO(modalias),
- __ATTR_RO(features),
- __ATTR_NULL
+static DEVICE_ATTR_RO(features);
+
+static struct attribute *virtio_dev_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_status.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_features.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(virtio_dev);
static inline int virtio_id_match(const struct virtio_device *dev,
const struct virtio_device_id *id)
@@ -165,7 +176,7 @@ static int virtio_dev_remove(struct device *_d)
static struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
- .dev_attrs = virtio_dev_attrs,
+ .dev_groups = virtio_dev_groups,
.uevent = virtio_uevent,
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 797e1c79a10..25ebe8eecdb 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -108,8 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
/* We should always be able to add one buffer to an empty queue. */
- if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
- BUG();
+ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */
@@ -148,7 +147,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
}
set_page_pfns(vb->pfns + vb->num_pfns, page);
vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
- totalram_pages--;
+ adjust_managed_page_count(page, -1);
}
/* Did we get any? */
@@ -163,8 +162,9 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
/* Find pfns pointing at start of each page, get pages and free them. */
for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- balloon_page_free(balloon_pfn_to_page(pfns[i]));
- totalram_pages++;
+ struct page *page = balloon_pfn_to_page(pfns[i]);
+ balloon_page_free(page);
+ adjust_managed_page_count(page, 1);
}
}
@@ -191,7 +191,8 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
* virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
* is true, we *have* to do it in this order
*/
- tell_host(vb, vb->deflate_vq);
+ if (vb->num_pfns != 0)
+ tell_host(vb, vb->deflate_vq);
mutex_unlock(&vb->balloon_lock);
release_pages_by_pfn(vb->pfns, vb->num_pfns);
}
@@ -256,8 +257,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
if (!virtqueue_get_buf(vq, &len))
return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
- if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
- BUG();
+ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
virtqueue_kick(vq);
}
@@ -273,9 +273,8 @@ static inline s64 towards_target(struct virtio_balloon *vb)
__le32 v;
s64 target;
- vb->vdev->config->get(vb->vdev,
- offsetof(struct virtio_balloon_config, num_pages),
- &v, sizeof(v));
+ virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v);
+
target = le32_to_cpu(v);
return target - vb->num_pages;
}
@@ -284,9 +283,8 @@ static void update_balloon_size(struct virtio_balloon *vb)
{
__le32 actual = cpu_to_le32(vb->num_pages);
- vb->vdev->config->set(vb->vdev,
- offsetof(struct virtio_balloon_config, actual),
- &actual, sizeof(actual));
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
+ &actual);
}
static int balloon(void *_vballoon)
@@ -310,6 +308,12 @@ static int balloon(void *_vballoon)
else if (diff < 0)
leak_balloon(vb, -diff);
update_balloon_size(vb);
+
+ /*
+ * For large balloon changes, we could spend a lot of time
+ * and always have work to do. Be nice if preempt disabled.
+ */
+ cond_resched();
}
return 0;
}
@@ -338,10 +342,10 @@ static int init_vqs(struct virtio_balloon *vb)
/*
* Prime this virtqueue with one buffer so the hypervisor can
- * use it to signal us later.
+ * use it to signal us later (it can't be broken yet!).
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
- if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb, GFP_KERNEL)
+ if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
< 0)
BUG();
virtqueue_kick(vb->stats_vq);
@@ -369,7 +373,7 @@ static const struct address_space_operations virtio_balloon_aops;
* This function preforms the balloon page migration task.
* Called through balloon_mapping->a_ops->migratepage
*/
-int virtballoon_migratepage(struct address_space *mapping,
+static int virtballoon_migratepage(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
struct balloon_dev_info *vb_dev_info = balloon_page_device(page);
@@ -511,7 +515,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
kfree(vb);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtballoon_freeze(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@@ -554,24 +558,13 @@ static struct virtio_driver virtio_balloon_driver = {
.probe = virtballoon_probe,
.remove = virtballoon_remove,
.config_changed = virtballoon_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtballoon_freeze,
.restore = virtballoon_restore,
#endif
};
-static int __init init(void)
-{
- return register_virtio_driver(&virtio_balloon_driver);
-}
-
-static void __exit fini(void)
-{
- unregister_virtio_driver(&virtio_balloon_driver);
-}
-module_init(init);
-module_exit(fini);
-
+module_virtio_driver(virtio_balloon_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio balloon driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 31f966f4d27..c600ccfd692 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -75,7 +75,7 @@
*
* 0x050 W QueueNotify Queue notifier
* 0x060 R InterruptStatus Interrupt status register
- * 0x060 W InterruptACK Interrupt acknowledge register
+ * 0x064 W InterruptACK Interrupt acknowledge register
* 0x070 RW Status Device status register
*
* 0x100+ RW Device-specific configuration space
@@ -219,13 +219,14 @@ static void vm_reset(struct virtio_device *vdev)
/* Transport interface */
/* the notify function used when creating a virt queue */
-static void vm_notify(struct virtqueue *vq)
+static bool vm_notify(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
/* We write the queue's selector into the notification register to
* signal the other end */
writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+ return true;
}
/* Notify all virtqueues on an interrupt. */
@@ -423,7 +424,7 @@ static const char *vm_bus_name(struct virtio_device *vdev)
return vm_dev->pdev->name;
}
-static struct virtio_config_ops virtio_mmio_config_ops = {
+static const struct virtio_config_ops virtio_mmio_config_ops = {
.get = vm_get,
.set = vm_set,
.get_status = vm_get_status,
@@ -470,7 +471,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
- if (memcmp(&magic, "virt", 4) != 0) {
+ if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
return -ENODEV;
}
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 0c142892c10..101db3faf5d 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -91,9 +91,9 @@ struct virtio_pci_vq_info
};
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
-static struct pci_device_id virtio_pci_id_table[] = {
- { 0x1af4, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { 0 },
+static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = {
+ { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
+ { 0 }
};
MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
@@ -197,13 +197,14 @@ static void vp_reset(struct virtio_device *vdev)
}
/* the notify function used when creating a virt queue */
-static void vp_notify(struct virtqueue *vq)
+static bool vp_notify(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
/* we write the queue's selector into the notification register to
* signal the other end */
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
+ return true;
}
/* Handle a configuration change: Tell driver if it wants to know. */
@@ -289,9 +290,9 @@ static void vp_free_vectors(struct virtio_device *vdev)
pci_disable_msix(vp_dev->pci_dev);
vp_dev->msix_enabled = 0;
- vp_dev->msix_vectors = 0;
}
+ vp_dev->msix_vectors = 0;
vp_dev->msix_used_vectors = 0;
kfree(vp_dev->msix_names);
vp_dev->msix_names = NULL;
@@ -309,6 +310,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
unsigned i, v;
int err = -ENOMEM;
+ vp_dev->msix_vectors = nvectors;
+
vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
GFP_KERNEL);
if (!vp_dev->msix_entries)
@@ -330,13 +333,10 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
for (i = 0; i < nvectors; ++i)
vp_dev->msix_entries[i].entry = i;
- /* pci_enable_msix returns positive if we can't get this many. */
- err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors);
- if (err > 0)
- err = -ENOSPC;
+ err = pci_enable_msix_exact(vp_dev->pci_dev,
+ vp_dev->msix_entries, nvectors);
if (err)
goto error;
- vp_dev->msix_vectors = nvectors;
vp_dev->msix_enabled = 1;
/* Set the vector used for configuration */
@@ -652,7 +652,7 @@ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
return 0;
}
-static struct virtio_config_ops virtio_pci_config_ops = {
+static const struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
.get_status = vp_get_status,
@@ -740,7 +740,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
return 0;
out_set_drvdata:
- pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
out_req_regions:
pci_release_regions(pci_dev);
@@ -758,14 +757,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
unregister_virtio_device(&vp_dev->vdev);
vp_del_vqs(&vp_dev->vdev);
- pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
kfree(vp_dev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtio_pci_freeze(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -823,7 +821,7 @@ static struct pci_driver virtio_pci_driver = {
.id_table = virtio_pci_id_table,
.probe = virtio_pci_probe,
.remove = virtio_pci_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.driver.pm = &virtio_pci_pm_ops,
#endif
};
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ffd7e7da5d3..4d08f45a9c2 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -23,27 +23,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
-
-/* virtio guest is communicating with a virtual "device" that actually runs on
- * a host processor. Memory barriers are used to control SMP effects. */
-#ifdef CONFIG_SMP
-/* Where possible, use SMP barriers which are more lightweight than mandatory
- * barriers, because mandatory barriers control MMIO effects on accesses
- * through relaxed memory I/O windows (which virtio-pci does not use). */
-#define virtio_mb(vq) \
- do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
-#define virtio_rmb(vq) \
- do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
-#define virtio_wmb(vq) \
- do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
-#else
-/* We must force memory ordering even if guest is UP since host could be
- * running on another CPU, but SMP barriers are defined to barrier() in that
- * configuration. So fall back to mandatory barriers instead. */
-#define virtio_mb(vq) mb()
-#define virtio_rmb(vq) rmb()
-#define virtio_wmb(vq) wmb()
-#endif
+#include <linux/kmemleak.h>
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
@@ -102,7 +82,7 @@ struct vring_virtqueue
u16 last_used_idx;
/* How to notify other side. FIXME: commonalize hcalls! */
- void (*notify)(struct virtqueue *vq);
+ bool (*notify)(struct virtqueue *vq);
#ifdef DEBUG
/* They're supposed to lock for us. */
@@ -119,16 +99,36 @@ struct vring_virtqueue
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
+static inline struct scatterlist *sg_next_chained(struct scatterlist *sg,
+ unsigned int *count)
+{
+ return sg_next(sg);
+}
+
+static inline struct scatterlist *sg_next_arr(struct scatterlist *sg,
+ unsigned int *count)
+{
+ if (--(*count) == 0)
+ return NULL;
+ return sg + 1;
+}
+
/* Set up an indirect table of descriptors and add it to the queue. */
-static int vring_add_indirect(struct vring_virtqueue *vq,
- struct scatterlist sg[],
- unsigned int out,
- unsigned int in,
- gfp_t gfp)
+static inline int vring_add_indirect(struct vring_virtqueue *vq,
+ struct scatterlist *sgs[],
+ struct scatterlist *(*next)
+ (struct scatterlist *, unsigned int *),
+ unsigned int total_sg,
+ unsigned int total_out,
+ unsigned int total_in,
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ gfp_t gfp)
{
struct vring_desc *desc;
unsigned head;
- int i;
+ struct scatterlist *sg;
+ int i, n;
/*
* We require lowmem mappings for the descriptors because
@@ -137,25 +137,31 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
*/
gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
- desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
+ desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
if (!desc)
return -ENOMEM;
- /* Transfer entries from the sg list into the indirect page */
- for (i = 0; i < out; i++) {
- desc[i].flags = VRING_DESC_F_NEXT;
- desc[i].addr = sg_phys(sg);
- desc[i].len = sg->length;
- desc[i].next = i+1;
- sg++;
+ /* Transfer entries from the sg lists into the indirect page */
+ i = 0;
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
+ desc[i].flags = VRING_DESC_F_NEXT;
+ desc[i].addr = sg_phys(sg);
+ desc[i].len = sg->length;
+ desc[i].next = i+1;
+ i++;
+ }
}
- for (; i < (out + in); i++) {
- desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
- desc[i].addr = sg_phys(sg);
- desc[i].len = sg->length;
- desc[i].next = i+1;
- sg++;
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
+ desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
+ desc[i].addr = sg_phys(sg);
+ desc[i].len = sg->length;
+ desc[i].next = i+1;
+ i++;
+ }
}
+ BUG_ON(i != total_sg);
/* Last one doesn't continue. */
desc[i-1].flags &= ~VRING_DESC_F_NEXT;
@@ -168,6 +174,8 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
head = vq->free_head;
vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
vq->vring.desc[head].addr = virt_to_phys(desc);
+ /* kmemleak gives a false positive, as it's hidden by virt_to_phys */
+ kmemleak_ignore(desc);
vq->vring.desc[head].len = i * sizeof(struct vring_desc);
/* Update free pointer */
@@ -176,35 +184,31 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}
-/**
- * virtqueue_add_buf - expose buffer to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: the description of the buffer(s).
- * @out_num: the number of sg readable by other side
- * @in_num: the number of sg which are writable (after readable ones)
- * @data: the token identifying the buffer.
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
- */
-int virtqueue_add_buf(struct virtqueue *_vq,
- struct scatterlist sg[],
- unsigned int out,
- unsigned int in,
- void *data,
- gfp_t gfp)
+static inline int virtqueue_add(struct virtqueue *_vq,
+ struct scatterlist *sgs[],
+ struct scatterlist *(*next)
+ (struct scatterlist *, unsigned int *),
+ unsigned int total_out,
+ unsigned int total_in,
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ void *data,
+ gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- unsigned int i, avail, uninitialized_var(prev);
+ struct scatterlist *sg;
+ unsigned int i, n, avail, uninitialized_var(prev), total_sg;
int head;
START_USE(vq);
BUG_ON(data == NULL);
+ if (unlikely(vq->broken)) {
+ END_USE(vq);
+ return -EIO;
+ }
+
#ifdef DEBUG
{
ktime_t now = ktime_get();
@@ -218,46 +222,54 @@ int virtqueue_add_buf(struct virtqueue *_vq,
}
#endif
+ total_sg = total_in + total_out;
+
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
- if (vq->indirect && (out + in) > 1 && vq->vq.num_free) {
- head = vring_add_indirect(vq, sg, out, in, gfp);
+ if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
+ head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
+ total_in,
+ out_sgs, in_sgs, gfp);
if (likely(head >= 0))
goto add_head;
}
- BUG_ON(out + in > vq->vring.num);
- BUG_ON(out + in == 0);
+ BUG_ON(total_sg > vq->vring.num);
+ BUG_ON(total_sg == 0);
- if (vq->vq.num_free < out + in) {
+ if (vq->vq.num_free < total_sg) {
pr_debug("Can't add buf len %i - avail = %i\n",
- out + in, vq->vq.num_free);
+ total_sg, vq->vq.num_free);
/* FIXME: for historical reasons, we force a notify here if
* there are outgoing parts to the buffer. Presumably the
* host should service the ring ASAP. */
- if (out)
+ if (out_sgs)
vq->notify(&vq->vq);
END_USE(vq);
return -ENOSPC;
}
/* We're about to use some buffers from the free list. */
- vq->vq.num_free -= out + in;
-
- head = vq->free_head;
- for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
- vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
- vq->vring.desc[i].addr = sg_phys(sg);
- vq->vring.desc[i].len = sg->length;
- prev = i;
- sg++;
+ vq->vq.num_free -= total_sg;
+
+ head = i = vq->free_head;
+ for (n = 0; n < out_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
+ vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
+ vq->vring.desc[i].addr = sg_phys(sg);
+ vq->vring.desc[i].len = sg->length;
+ prev = i;
+ i = vq->vring.desc[i].next;
+ }
}
- for (; in; i = vq->vring.desc[i].next, in--) {
- vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
- vq->vring.desc[i].addr = sg_phys(sg);
- vq->vring.desc[i].len = sg->length;
- prev = i;
- sg++;
+ for (; n < (out_sgs + in_sgs); n++) {
+ for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
+ vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
+ vq->vring.desc[i].addr = sg_phys(sg);
+ vq->vring.desc[i].len = sg->length;
+ prev = i;
+ i = vq->vring.desc[i].next;
+ }
}
/* Last one doesn't continue. */
vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
@@ -276,7 +288,7 @@ add_head:
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
- virtio_wmb(vq);
+ virtio_wmb(vq->weak_barriers);
vq->vring.avail->idx++;
vq->num_added++;
@@ -290,7 +302,89 @@ add_head:
return 0;
}
-EXPORT_SYMBOL_GPL(virtqueue_add_buf);
+
+/**
+ * virtqueue_add_sgs - expose buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sgs: array of terminated scatterlists.
+ * @out_num: the number of scatterlists readable by other side
+ * @in_num: the number of scatterlists which are writable (after readable ones)
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_sgs(struct virtqueue *_vq,
+ struct scatterlist *sgs[],
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ void *data,
+ gfp_t gfp)
+{
+ unsigned int i, total_out, total_in;
+
+ /* Count them first. */
+ for (i = total_out = total_in = 0; i < out_sgs; i++) {
+ struct scatterlist *sg;
+ for (sg = sgs[i]; sg; sg = sg_next(sg))
+ total_out++;
+ }
+ for (; i < out_sgs + in_sgs; i++) {
+ struct scatterlist *sg;
+ for (sg = sgs[i]; sg; sg = sg_next(sg))
+ total_in++;
+ }
+ return virtqueue_add(_vq, sgs, sg_next_chained,
+ total_out, total_in, out_sgs, in_sgs, data, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
+
+/**
+ * virtqueue_add_outbuf - expose output buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sgs: array of scatterlists (need not be terminated!)
+ * @num: the number of scatterlists readable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_outbuf(struct virtqueue *vq,
+ struct scatterlist sg[], unsigned int num,
+ void *data,
+ gfp_t gfp)
+{
+ return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
+
+/**
+ * virtqueue_add_inbuf - expose input buffers to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sgs: array of scatterlists (need not be terminated!)
+ * @num: the number of scatterlists writable by other side
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ */
+int virtqueue_add_inbuf(struct virtqueue *vq,
+ struct scatterlist sg[], unsigned int num,
+ void *data,
+ gfp_t gfp)
+{
+ return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
@@ -312,7 +406,7 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
START_USE(vq);
/* We need to expose available array entries before checking avail
* event. */
- virtio_mb(vq);
+ virtio_mb(vq->weak_barriers);
old = vq->vring.avail->idx - vq->num_added;
new = vq->vring.avail->idx;
@@ -342,13 +436,22 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
* @vq: the struct virtqueue
*
* This does not need to be serialized.
+ *
+ * Returns false if host notify failed or queue is broken, otherwise true.
*/
-void virtqueue_notify(struct virtqueue *_vq)
+bool virtqueue_notify(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ if (unlikely(vq->broken))
+ return false;
+
/* Prod other side to tell it about changes. */
- vq->notify(_vq);
+ if (!vq->notify(_vq)) {
+ vq->broken = true;
+ return false;
+ }
+ return true;
}
EXPORT_SYMBOL_GPL(virtqueue_notify);
@@ -356,16 +459,19 @@ EXPORT_SYMBOL_GPL(virtqueue_notify);
* virtqueue_kick - update after add_buf
* @vq: the struct virtqueue
*
- * After one or more virtqueue_add_buf calls, invoke this to kick
+ * After one or more virtqueue_add_* calls, invoke this to kick
* the other side.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
+ *
+ * Returns false if kick failed, otherwise true.
*/
-void virtqueue_kick(struct virtqueue *vq)
+bool virtqueue_kick(struct virtqueue *vq)
{
if (virtqueue_kick_prepare(vq))
- virtqueue_notify(vq);
+ return virtqueue_notify(vq);
+ return true;
}
EXPORT_SYMBOL_GPL(virtqueue_kick);
@@ -413,7 +519,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
* operations at the same time (except where noted).
*
* Returns NULL if there are no used buffers, or the "data" token
- * handed to virtqueue_add_buf().
+ * handed to virtqueue_add_*().
*/
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
@@ -436,7 +542,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
}
/* Only get used array entries after they have been exposed by host. */
- virtio_rmb(vq);
+ virtio_rmb(vq->weak_barriers);
last_used = (vq->last_used_idx & (vq->vring.num - 1));
i = vq->vring.used->ring[last_used].id;
@@ -460,7 +566,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
* the read in the next get_buf call. */
if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
vring_used_event(&vq->vring) = vq->last_used_idx;
- virtio_mb(vq);
+ virtio_mb(vq->weak_barriers);
}
#ifdef DEBUG
@@ -490,19 +596,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/**
- * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
* @vq: the struct virtqueue we're talking about.
*
- * This re-enables callbacks; it returns "false" if there are pending
- * buffers in the queue, to detect a possible race between the driver
- * checking for more work, and enabling callbacks.
+ * This re-enables callbacks; it returns current queue state
+ * in an opaque unsigned value. This value should be later tested by
+ * virtqueue_poll, to detect a possible race between the driver checking for
+ * more work, and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
-bool virtqueue_enable_cb(struct virtqueue *_vq)
+unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 last_used_idx;
START_USE(vq);
@@ -512,15 +620,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
* either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vring_used_event(&vq->vring) = vq->last_used_idx;
- virtio_mb(vq);
- if (unlikely(more_used(vq))) {
- END_USE(vq);
- return false;
- }
-
+ vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
END_USE(vq);
- return true;
+ return last_used_idx;
+}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
+
+/**
+ * virtqueue_poll - query pending used buffers
+ * @vq: the struct virtqueue we're talking about.
+ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
+ *
+ * Returns "true" if there are pending used buffers in the queue.
+ *
+ * This does not need to be serialized.
+ */
+bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ virtio_mb(vq->weak_barriers);
+ return (u16)last_used_idx != vq->vring.used->idx;
+}
+EXPORT_SYMBOL_GPL(virtqueue_poll);
+
+/**
+ * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+bool virtqueue_enable_cb(struct virtqueue *_vq)
+{
+ unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
+ return !virtqueue_poll(_vq, last_used_idx);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
@@ -553,7 +691,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
/* TODO: tune this threshold */
bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
- virtio_mb(vq);
+ virtio_mb(vq->weak_barriers);
if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
END_USE(vq);
return false;
@@ -568,7 +706,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
* virtqueue_detach_unused_buf - detach first unused buffer
* @vq: the struct virtqueue we're talking about.
*
- * Returns NULL or the "data" token handed to virtqueue_add_buf().
+ * Returns NULL or the "data" token handed to virtqueue_add_*().
* This is not valid on an active queue; it is useful only for device
* shutdown.
*/
@@ -624,7 +762,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
struct virtio_device *vdev,
bool weak_barriers,
void *pages,
- void (*notify)(struct virtqueue *),
+ bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{
@@ -719,4 +857,27 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+bool virtqueue_is_broken(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->broken;
+}
+EXPORT_SYMBOL_GPL(virtqueue_is_broken);
+
+/*
+ * This should prevent the device from being used, allowing drivers to
+ * recover. You may need to grab appropriate locks to flush.
+ */
+void virtio_break_device(struct virtio_device *dev)
+{
+ struct virtqueue *_vq;
+
+ list_for_each_entry(_vq, &dev->vqs, list) {
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ vq->broken = true;
+ }
+}
+EXPORT_SYMBOL_GPL(virtio_break_device);
+
MODULE_LICENSE("GPL");