aboutsummaryrefslogtreecommitdiff
path: root/drivers/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio')
-rw-r--r--drivers/virtio/virtio.c27
-rw-r--r--drivers/virtio/virtio_balloon.c40
-rw-r--r--drivers/virtio/virtio_mmio.c5
-rw-r--r--drivers/virtio/virtio_pci.c20
-rw-r--r--drivers/virtio/virtio_ring.c154
5 files changed, 152 insertions, 94 deletions
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index ee59b74768d..fed0ce198ae 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -13,18 +13,24 @@ static ssize_t device_show(struct device *_d,
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.device);
}
+static DEVICE_ATTR_RO(device);
+
static ssize_t vendor_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.vendor);
}
+static DEVICE_ATTR_RO(vendor);
+
static ssize_t status_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%08x\n", dev->config->get_status(dev));
}
+static DEVICE_ATTR_RO(status);
+
static ssize_t modalias_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
@@ -32,6 +38,8 @@ static ssize_t modalias_show(struct device *_d,
return sprintf(buf, "virtio:d%08Xv%08X\n",
dev->id.device, dev->id.vendor);
}
+static DEVICE_ATTR_RO(modalias);
+
static ssize_t features_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
@@ -47,14 +55,17 @@ static ssize_t features_show(struct device *_d,
len += sprintf(buf+len, "\n");
return len;
}
-static struct device_attribute virtio_dev_attrs[] = {
- __ATTR_RO(device),
- __ATTR_RO(vendor),
- __ATTR_RO(status),
- __ATTR_RO(modalias),
- __ATTR_RO(features),
- __ATTR_NULL
+static DEVICE_ATTR_RO(features);
+
+static struct attribute *virtio_dev_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_status.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_features.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(virtio_dev);
static inline int virtio_id_match(const struct virtio_device *dev,
const struct virtio_device_id *id)
@@ -165,7 +176,7 @@ static int virtio_dev_remove(struct device *_d)
static struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
- .dev_attrs = virtio_dev_attrs,
+ .dev_groups = virtio_dev_groups,
.uevent = virtio_uevent,
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bd3ae324a1a..25ebe8eecdb 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -108,8 +108,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
/* We should always be able to add one buffer to an empty queue. */
- if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
- BUG();
+ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */
@@ -148,7 +147,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
}
set_page_pfns(vb->pfns + vb->num_pfns, page);
vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
- totalram_pages--;
+ adjust_managed_page_count(page, -1);
}
/* Did we get any? */
@@ -163,8 +162,9 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
/* Find pfns pointing at start of each page, get pages and free them. */
for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- balloon_page_free(balloon_pfn_to_page(pfns[i]));
- totalram_pages++;
+ struct page *page = balloon_pfn_to_page(pfns[i]);
+ balloon_page_free(page);
+ adjust_managed_page_count(page, 1);
}
}
@@ -191,7 +191,8 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
* virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
* is true, we *have* to do it in this order
*/
- tell_host(vb, vb->deflate_vq);
+ if (vb->num_pfns != 0)
+ tell_host(vb, vb->deflate_vq);
mutex_unlock(&vb->balloon_lock);
release_pages_by_pfn(vb->pfns, vb->num_pfns);
}
@@ -256,8 +257,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
if (!virtqueue_get_buf(vq, &len))
return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
- if (virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL) < 0)
- BUG();
+ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
virtqueue_kick(vq);
}
@@ -273,9 +273,8 @@ static inline s64 towards_target(struct virtio_balloon *vb)
__le32 v;
s64 target;
- vb->vdev->config->get(vb->vdev,
- offsetof(struct virtio_balloon_config, num_pages),
- &v, sizeof(v));
+ virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v);
+
target = le32_to_cpu(v);
return target - vb->num_pages;
}
@@ -284,9 +283,8 @@ static void update_balloon_size(struct virtio_balloon *vb)
{
__le32 actual = cpu_to_le32(vb->num_pages);
- vb->vdev->config->set(vb->vdev,
- offsetof(struct virtio_balloon_config, actual),
- &actual, sizeof(actual));
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
+ &actual);
}
static int balloon(void *_vballoon)
@@ -310,6 +308,12 @@ static int balloon(void *_vballoon)
else if (diff < 0)
leak_balloon(vb, -diff);
update_balloon_size(vb);
+
+ /*
+ * For large balloon changes, we could spend a lot of time
+ * and always have work to do. Be nice if preempt disabled.
+ */
+ cond_resched();
}
return 0;
}
@@ -338,7 +342,7 @@ static int init_vqs(struct virtio_balloon *vb)
/*
* Prime this virtqueue with one buffer so the hypervisor can
- * use it to signal us later.
+ * use it to signal us later (it can't be broken yet!).
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
@@ -369,7 +373,7 @@ static const struct address_space_operations virtio_balloon_aops;
* This function preforms the balloon page migration task.
* Called through balloon_mapping->a_ops->migratepage
*/
-int virtballoon_migratepage(struct address_space *mapping,
+static int virtballoon_migratepage(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
struct balloon_dev_info *vb_dev_info = balloon_page_device(page);
@@ -511,7 +515,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
kfree(vb);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtballoon_freeze(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@@ -554,7 +558,7 @@ static struct virtio_driver virtio_balloon_driver = {
.probe = virtballoon_probe,
.remove = virtballoon_remove,
.config_changed = virtballoon_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.freeze = virtballoon_freeze,
.restore = virtballoon_restore,
#endif
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 1ba0d683101..c600ccfd692 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -219,13 +219,14 @@ static void vm_reset(struct virtio_device *vdev)
/* Transport interface */
/* the notify function used when creating a virt queue */
-static void vm_notify(struct virtqueue *vq)
+static bool vm_notify(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
/* We write the queue's selector into the notification register to
* signal the other end */
writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+ return true;
}
/* Notify all virtqueues on an interrupt. */
@@ -470,7 +471,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
- if (memcmp(&magic, "virt", 4) != 0) {
+ if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
return -ENODEV;
}
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index a7ce73029f5..101db3faf5d 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -197,13 +197,14 @@ static void vp_reset(struct virtio_device *vdev)
}
/* the notify function used when creating a virt queue */
-static void vp_notify(struct virtqueue *vq)
+static bool vp_notify(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
/* we write the queue's selector into the notification register to
* signal the other end */
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
+ return true;
}
/* Handle a configuration change: Tell driver if it wants to know. */
@@ -289,9 +290,9 @@ static void vp_free_vectors(struct virtio_device *vdev)
pci_disable_msix(vp_dev->pci_dev);
vp_dev->msix_enabled = 0;
- vp_dev->msix_vectors = 0;
}
+ vp_dev->msix_vectors = 0;
vp_dev->msix_used_vectors = 0;
kfree(vp_dev->msix_names);
vp_dev->msix_names = NULL;
@@ -309,6 +310,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
unsigned i, v;
int err = -ENOMEM;
+ vp_dev->msix_vectors = nvectors;
+
vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
GFP_KERNEL);
if (!vp_dev->msix_entries)
@@ -330,13 +333,10 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
for (i = 0; i < nvectors; ++i)
vp_dev->msix_entries[i].entry = i;
- /* pci_enable_msix returns positive if we can't get this many. */
- err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors);
- if (err > 0)
- err = -ENOSPC;
+ err = pci_enable_msix_exact(vp_dev->pci_dev,
+ vp_dev->msix_entries, nvectors);
if (err)
goto error;
- vp_dev->msix_vectors = nvectors;
vp_dev->msix_enabled = 1;
/* Set the vector used for configuration */
@@ -740,7 +740,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
return 0;
out_set_drvdata:
- pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
out_req_regions:
pci_release_regions(pci_dev);
@@ -758,14 +757,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
unregister_virtio_device(&vp_dev->vdev);
vp_del_vqs(&vp_dev->vdev);
- pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
kfree(vp_dev);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int virtio_pci_freeze(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -823,7 +821,7 @@ static struct pci_driver virtio_pci_driver = {
.id_table = virtio_pci_id_table,
.probe = virtio_pci_probe,
.remove = virtio_pci_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.driver.pm = &virtio_pci_pm_ops,
#endif
};
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5217baf5528..4d08f45a9c2 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
+#include <linux/kmemleak.h>
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
@@ -81,7 +82,7 @@ struct vring_virtqueue
u16 last_used_idx;
/* How to notify other side. FIXME: commonalize hcalls! */
- void (*notify)(struct virtqueue *vq);
+ bool (*notify)(struct virtqueue *vq);
#ifdef DEBUG
/* They're supposed to lock for us. */
@@ -173,6 +174,8 @@ static inline int vring_add_indirect(struct vring_virtqueue *vq,
head = vq->free_head;
vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
vq->vring.desc[head].addr = virt_to_phys(desc);
+ /* kmemleak gives a false positive, as it's hidden by virt_to_phys */
+ kmemleak_ignore(desc);
vq->vring.desc[head].len = i * sizeof(struct vring_desc);
/* Update free pointer */
@@ -201,6 +204,11 @@ static inline int virtqueue_add(struct virtqueue *_vq,
BUG_ON(data == NULL);
+ if (unlikely(vq->broken)) {
+ END_USE(vq);
+ return -EIO;
+ }
+
#ifdef DEBUG
{
ktime_t now = ktime_get();
@@ -296,37 +304,6 @@ add_head:
}
/**
- * virtqueue_add_buf - expose buffer to other end
- * @vq: the struct virtqueue we're talking about.
- * @sg: the description of the buffer(s).
- * @out_num: the number of sg readable by other side
- * @in_num: the number of sg which are writable (after readable ones)
- * @data: the token identifying the buffer.
- * @gfp: how to do memory allocations (if necessary).
- *
- * Caller must ensure we don't call this with other virtqueue operations
- * at the same time (except where noted).
- *
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
- */
-int virtqueue_add_buf(struct virtqueue *_vq,
- struct scatterlist sg[],
- unsigned int out,
- unsigned int in,
- void *data,
- gfp_t gfp)
-{
- struct scatterlist *sgs[2];
-
- sgs[0] = sg;
- sgs[1] = sg + out;
-
- return virtqueue_add(_vq, sgs, sg_next_arr,
- out, in, out ? 1 : 0, in ? 1 : 0, data, gfp);
-}
-EXPORT_SYMBOL_GPL(virtqueue_add_buf);
-
-/**
* virtqueue_add_sgs - expose buffers to other end
* @vq: the struct virtqueue we're talking about.
* @sgs: array of terminated scatterlists.
@@ -338,7 +315,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_buf);
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_sgs(struct virtqueue *_vq,
struct scatterlist *sgs[],
@@ -376,7 +353,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_outbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
@@ -398,7 +375,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_inbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
@@ -459,13 +436,22 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
* @vq: the struct virtqueue
*
* This does not need to be serialized.
+ *
+ * Returns false if host notify failed or queue is broken, otherwise true.
*/
-void virtqueue_notify(struct virtqueue *_vq)
+bool virtqueue_notify(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ if (unlikely(vq->broken))
+ return false;
+
/* Prod other side to tell it about changes. */
- vq->notify(_vq);
+ if (!vq->notify(_vq)) {
+ vq->broken = true;
+ return false;
+ }
+ return true;
}
EXPORT_SYMBOL_GPL(virtqueue_notify);
@@ -473,16 +459,19 @@ EXPORT_SYMBOL_GPL(virtqueue_notify);
* virtqueue_kick - update after add_buf
* @vq: the struct virtqueue
*
- * After one or more virtqueue_add_buf calls, invoke this to kick
+ * After one or more virtqueue_add_* calls, invoke this to kick
* the other side.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
+ *
+ * Returns false if kick failed, otherwise true.
*/
-void virtqueue_kick(struct virtqueue *vq)
+bool virtqueue_kick(struct virtqueue *vq)
{
if (virtqueue_kick_prepare(vq))
- virtqueue_notify(vq);
+ return virtqueue_notify(vq);
+ return true;
}
EXPORT_SYMBOL_GPL(virtqueue_kick);
@@ -530,7 +519,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
* operations at the same time (except where noted).
*
* Returns NULL if there are no used buffers, or the "data" token
- * handed to virtqueue_add_buf().
+ * handed to virtqueue_add_*().
*/
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
@@ -607,19 +596,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/**
- * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
* @vq: the struct virtqueue we're talking about.
*
- * This re-enables callbacks; it returns "false" if there are pending
- * buffers in the queue, to detect a possible race between the driver
- * checking for more work, and enabling callbacks.
+ * This re-enables callbacks; it returns current queue state
+ * in an opaque unsigned value. This value should be later tested by
+ * virtqueue_poll, to detect a possible race between the driver checking for
+ * more work, and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
-bool virtqueue_enable_cb(struct virtqueue *_vq)
+unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 last_used_idx;
START_USE(vq);
@@ -629,15 +620,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
* either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vring_used_event(&vq->vring) = vq->last_used_idx;
+ vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
+ END_USE(vq);
+ return last_used_idx;
+}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
+
+/**
+ * virtqueue_poll - query pending used buffers
+ * @vq: the struct virtqueue we're talking about.
+ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
+ *
+ * Returns "true" if there are pending used buffers in the queue.
+ *
+ * This does not need to be serialized.
+ */
+bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
virtio_mb(vq->weak_barriers);
- if (unlikely(more_used(vq))) {
- END_USE(vq);
- return false;
- }
+ return (u16)last_used_idx != vq->vring.used->idx;
+}
+EXPORT_SYMBOL_GPL(virtqueue_poll);
- END_USE(vq);
- return true;
+/**
+ * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+bool virtqueue_enable_cb(struct virtqueue *_vq)
+{
+ unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
+ return !virtqueue_poll(_vq, last_used_idx);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
@@ -685,7 +706,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
* virtqueue_detach_unused_buf - detach first unused buffer
* @vq: the struct virtqueue we're talking about.
*
- * Returns NULL or the "data" token handed to virtqueue_add_buf().
+ * Returns NULL or the "data" token handed to virtqueue_add_*().
* This is not valid on an active queue; it is useful only for device
* shutdown.
*/
@@ -741,7 +762,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
struct virtio_device *vdev,
bool weak_barriers,
void *pages,
- void (*notify)(struct virtqueue *),
+ bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{
@@ -836,4 +857,27 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+bool virtqueue_is_broken(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->broken;
+}
+EXPORT_SYMBOL_GPL(virtqueue_is_broken);
+
+/*
+ * This should prevent the device from being used, allowing drivers to
+ * recover. You may need to grab appropriate locks to flush.
+ */
+void virtio_break_device(struct virtio_device *dev)
+{
+ struct virtqueue *_vq;
+
+ list_for_each_entry(_vq, &dev->vqs, list) {
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ vq->broken = true;
+ }
+}
+EXPORT_SYMBOL_GPL(virtio_break_device);
+
MODULE_LICENSE("GPL");