aboutsummaryrefslogtreecommitdiff
path: root/drivers/vfio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/Kconfig7
-rw-r--r--drivers/vfio/Makefile1
-rw-r--r--drivers/vfio/pci/Kconfig10
-rw-r--r--drivers/vfio/pci/vfio_pci.c443
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c251
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c98
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h20
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c281
-rw-r--r--drivers/vfio/vfio.c359
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c377
-rw-r--r--drivers/vfio/vfio_iommu_type1.c855
11 files changed, 1921 insertions, 781 deletions
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index 7cd5dec0abd..af7b204b921 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -3,10 +3,17 @@ config VFIO_IOMMU_TYPE1
depends on VFIO
default n
+config VFIO_IOMMU_SPAPR_TCE
+ tristate
+ depends on VFIO && SPAPR_TCE_IOMMU
+ default n
+
menuconfig VFIO
tristate "VFIO Non-Privileged userspace driver framework"
depends on IOMMU_API
select VFIO_IOMMU_TYPE1 if X86
+ select VFIO_IOMMU_SPAPR_TCE if (PPC_POWERNV || PPC_PSERIES)
+ select ANON_INODES
help
VFIO provides a framework for secure userspace device drivers.
See Documentation/vfio.txt for more details.
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index 2398d4a0e38..72bfabc8629 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_VFIO) += vfio.o
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
+obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o
obj-$(CONFIG_VFIO_PCI) += pci/
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 5980758563e..c41b01e2b69 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -6,3 +6,13 @@ config VFIO_PCI
use of PCI drivers using the VFIO framework.
If you don't know what to do here, say N.
+
+config VFIO_PCI_VGA
+ bool "VFIO PCI support for VGA devices"
+ depends on VFIO_PCI && X86 && VGA_ARB
+ help
+ Support for VGA extension to VFIO PCI. This exposes an additional
+ region on VGA devices for accessing legacy VGA addresses used by
+ BIOS and generic video drivers.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index b28e66c4376..010e0f8b8e4 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/eventfd.h>
+#include <linux/file.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
@@ -56,7 +57,8 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
ret = vfio_config_init(vdev);
if (ret) {
- pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state);
+ kfree(vdev->pci_saved_state);
+ vdev->pci_saved_state = NULL;
pci_disable_device(pdev);
return ret;
}
@@ -70,7 +72,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
- msix_pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ msix_pos = pdev->msix_cap;
if (msix_pos) {
u16 flags;
u32 table;
@@ -78,12 +80,17 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
- vdev->msix_bar = table & PCI_MSIX_FLAGS_BIRMASK;
- vdev->msix_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
+ vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
+ vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
} else
vdev->msix_bar = 0xFF;
+#ifdef CONFIG_VFIO_PCI_VGA
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ vdev->has_vga = true;
+#endif
+
return 0;
}
@@ -132,8 +139,16 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
*/
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
- if (vdev->reset_works)
- __pci_reset_function(pdev);
+ /*
+ * Try to reset the device. The success of this is dependent on
+ * being able to lock the device, which is not always possible.
+ */
+ if (vdev->reset_works) {
+ int ret = pci_try_reset_function(pdev);
+ if (ret)
+ pr_warn("%s: Failed to reset device %s (%d)\n",
+ __func__, dev_name(&pdev->dev), ret);
+ }
pci_restore_state(pdev);
}
@@ -178,29 +193,134 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
u8 pos;
u16 flags;
- pos = pci_find_capability(vdev->pdev, PCI_CAP_ID_MSI);
+ pos = vdev->pdev->msi_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSI_FLAGS, &flags);
-
- return 1 << (flags & PCI_MSI_FLAGS_QMASK);
+ return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
}
} else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
u8 pos;
u16 flags;
- pos = pci_find_capability(vdev->pdev, PCI_CAP_ID_MSIX);
+ pos = vdev->pdev->msix_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSIX_FLAGS, &flags);
return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
}
- }
+ } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX)
+ if (pci_is_pcie(vdev->pdev))
+ return 1;
+
+ return 0;
+}
+static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
+{
+ (*(int *)data)++;
return 0;
}
+struct vfio_pci_fill_info {
+ int max;
+ int cur;
+ struct vfio_pci_dependent_device *devices;
+};
+
+static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_fill_info *fill = data;
+ struct iommu_group *iommu_group;
+
+ if (fill->cur == fill->max)
+ return -EAGAIN; /* Something changed, try again */
+
+ iommu_group = iommu_group_get(&pdev->dev);
+ if (!iommu_group)
+ return -EPERM; /* Cannot reset non-isolated devices */
+
+ fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
+ fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
+ fill->devices[fill->cur].bus = pdev->bus->number;
+ fill->devices[fill->cur].devfn = pdev->devfn;
+ fill->cur++;
+ iommu_group_put(iommu_group);
+ return 0;
+}
+
+struct vfio_pci_group_entry {
+ struct vfio_group *group;
+ int id;
+};
+
+struct vfio_pci_group_info {
+ int count;
+ struct vfio_pci_group_entry *groups;
+};
+
+static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_group_info *info = data;
+ struct iommu_group *group;
+ int id, i;
+
+ group = iommu_group_get(&pdev->dev);
+ if (!group)
+ return -EPERM;
+
+ id = iommu_group_id(group);
+
+ for (i = 0; i < info->count; i++)
+ if (info->groups[i].id == id)
+ break;
+
+ iommu_group_put(group);
+
+ return (i == info->count) ? -EINVAL : 0;
+}
+
+static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
+{
+ for (; pdev; pdev = pdev->bus->self)
+ if (pdev->bus == slot->bus)
+ return (pdev->slot == slot);
+ return false;
+}
+
+struct vfio_pci_walk_info {
+ int (*fn)(struct pci_dev *, void *data);
+ void *data;
+ struct pci_dev *pdev;
+ bool slot;
+ int ret;
+};
+
+static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
+{
+ struct vfio_pci_walk_info *walk = data;
+
+ if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
+ walk->ret = walk->fn(pdev, walk->data);
+
+ return walk->ret;
+}
+
+static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
+ int (*fn)(struct pci_dev *,
+ void *data), void *data,
+ bool slot)
+{
+ struct vfio_pci_walk_info walk = {
+ .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
+ };
+
+ pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
+
+ return walk.ret;
+}
+
static long vfio_pci_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
@@ -285,6 +405,16 @@ static long vfio_pci_ioctl(void *device_data,
info.flags = VFIO_REGION_INFO_FLAG_READ;
break;
}
+ case VFIO_PCI_VGA_REGION_INDEX:
+ if (!vdev->has_vga)
+ return -EINVAL;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0xc0000;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+
+ break;
default:
return -EINVAL;
}
@@ -302,6 +432,17 @@ static long vfio_pci_ioctl(void *device_data,
if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
return -EINVAL;
+ switch (info.index) {
+ case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+ break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ if (pci_is_pcie(vdev->pdev))
+ break;
+ /* pass thru to return error */
+ default:
+ return -EINVAL;
+ }
+
info.flags = VFIO_IRQ_INFO_EVENTFD;
info.count = vfio_pci_get_irq_count(vdev, info.index);
@@ -331,6 +472,7 @@ static long vfio_pci_ioctl(void *device_data,
if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
size_t size;
+ int max = vfio_pci_get_irq_count(vdev, hdr.index);
if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
size = sizeof(uint8_t);
@@ -340,7 +482,7 @@ static long vfio_pci_ioctl(void *device_data,
return -EINVAL;
if (hdr.argsz - minsz < hdr.count * size ||
- hdr.count > vfio_pci_get_irq_count(vdev, hdr.index))
+ hdr.start >= max || hdr.start + hdr.count > max)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
@@ -359,59 +501,236 @@ static long vfio_pci_ioctl(void *device_data,
return ret;
- } else if (cmd == VFIO_DEVICE_RESET)
+ } else if (cmd == VFIO_DEVICE_RESET) {
return vdev->reset_works ?
- pci_reset_function(vdev->pdev) : -EINVAL;
+ pci_try_reset_function(vdev->pdev) : -EINVAL;
+
+ } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
+ struct vfio_pci_hot_reset_info hdr;
+ struct vfio_pci_fill_info fill = { 0 };
+ struct vfio_pci_dependent_device *devices = NULL;
+ bool slot = false;
+ int ret = 0;
+
+ minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz)
+ return -EINVAL;
+
+ hdr.flags = 0;
+
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+ /* How many devices are affected? */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_count_devs,
+ &fill.max, slot);
+ if (ret)
+ return ret;
+
+ WARN_ON(!fill.max); /* Should always be at least one */
+
+ /*
+ * If there's enough space, fill it now, otherwise return
+ * -ENOSPC and the number of devices affected.
+ */
+ if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
+ ret = -ENOSPC;
+ hdr.count = fill.max;
+ goto reset_info_exit;
+ }
+
+ devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
+ if (!devices)
+ return -ENOMEM;
+
+ fill.devices = devices;
+
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_fill_devs,
+ &fill, slot);
+
+ /*
+ * If a device was removed between counting and filling,
+ * we may come up short of fill.max. If a device was
+ * added, we'll have a return of -EAGAIN above.
+ */
+ if (!ret)
+ hdr.count = fill.cur;
+
+reset_info_exit:
+ if (copy_to_user((void __user *)arg, &hdr, minsz))
+ ret = -EFAULT;
+
+ if (!ret) {
+ if (copy_to_user((void __user *)(arg + minsz), devices,
+ hdr.count * sizeof(*devices)))
+ ret = -EFAULT;
+ }
+
+ kfree(devices);
+ return ret;
+
+ } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
+ struct vfio_pci_hot_reset hdr;
+ int32_t *group_fds;
+ struct vfio_pci_group_entry *groups;
+ struct vfio_pci_group_info info;
+ bool slot = false;
+ int i, count = 0, ret = 0;
+
+ minsz = offsetofend(struct vfio_pci_hot_reset, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz || hdr.flags)
+ return -EINVAL;
+
+ /* Can we do a slot or bus reset or neither? */
+ if (!pci_probe_reset_slot(vdev->pdev->slot))
+ slot = true;
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+ /*
+ * We can't let userspace give us an arbitrarily large
+ * buffer to copy, so verify how many we think there
+ * could be. Note groups can have multiple devices so
+ * one group per device is the max.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_count_devs,
+ &count, slot);
+ if (ret)
+ return ret;
+
+ /* Somewhere between 1 and count is OK */
+ if (!hdr.count || hdr.count > count)
+ return -EINVAL;
+
+ group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
+ groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
+ if (!group_fds || !groups) {
+ kfree(group_fds);
+ kfree(groups);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(group_fds, (void __user *)(arg + minsz),
+ hdr.count * sizeof(*group_fds))) {
+ kfree(group_fds);
+ kfree(groups);
+ return -EFAULT;
+ }
+
+ /*
+ * For each group_fd, get the group through the vfio external
+ * user interface and store the group and iommu ID. This
+ * ensures the group is held across the reset.
+ */
+ for (i = 0; i < hdr.count; i++) {
+ struct vfio_group *group;
+ struct fd f = fdget(group_fds[i]);
+ if (!f.file) {
+ ret = -EBADF;
+ break;
+ }
+
+ group = vfio_group_get_external_user(f.file);
+ fdput(f);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ break;
+ }
+
+ groups[i].group = group;
+ groups[i].id = vfio_external_user_iommu_id(group);
+ }
+
+ kfree(group_fds);
+
+ /* release reference to groups on error */
+ if (ret)
+ goto hot_reset_release;
+
+ info.count = hdr.count;
+ info.groups = groups;
+
+ /*
+ * Test whether all the affected devices are contained
+ * by the set of groups provided by the user.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_validate_devs,
+ &info, slot);
+ if (!ret)
+ /* User has access, do the reset */
+ ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
+ pci_try_reset_bus(vdev->pdev->bus);
+
+hot_reset_release:
+ for (i--; i >= 0; i--)
+ vfio_group_put_external_user(groups[i].group);
+
+ kfree(groups);
+ return ret;
+ }
return -ENOTTY;
}
-static ssize_t vfio_pci_read(void *device_data, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
{
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct vfio_pci_device *vdev = device_data;
- struct pci_dev *pdev = vdev->pdev;
if (index >= VFIO_PCI_NUM_REGIONS)
return -EINVAL;
- if (index == VFIO_PCI_CONFIG_REGION_INDEX)
- return vfio_pci_config_readwrite(vdev, buf, count, ppos, false);
- else if (index == VFIO_PCI_ROM_REGION_INDEX)
- return vfio_pci_mem_readwrite(vdev, buf, count, ppos, false);
- else if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
- return vfio_pci_io_readwrite(vdev, buf, count, ppos, false);
- else if (pci_resource_flags(pdev, index) & IORESOURCE_MEM)
- return vfio_pci_mem_readwrite(vdev, buf, count, ppos, false);
+ switch (index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ if (iswrite)
+ return -EINVAL;
+ return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
+
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
+
+ case VFIO_PCI_VGA_REGION_INDEX:
+ return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+ }
return -EINVAL;
}
-static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t vfio_pci_read(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos)
{
- unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- struct vfio_pci_device *vdev = device_data;
- struct pci_dev *pdev = vdev->pdev;
+ if (!count)
+ return 0;
- if (index >= VFIO_PCI_NUM_REGIONS)
- return -EINVAL;
+ return vfio_pci_rw(device_data, buf, count, ppos, false);
+}
- if (index == VFIO_PCI_CONFIG_REGION_INDEX)
- return vfio_pci_config_readwrite(vdev, (char __user *)buf,
- count, ppos, true);
- else if (index == VFIO_PCI_ROM_REGION_INDEX)
- return -EINVAL;
- else if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
- return vfio_pci_io_readwrite(vdev, (char __user *)buf,
- count, ppos, true);
- else if (pci_resource_flags(pdev, index) & IORESOURCE_MEM) {
- return vfio_pci_mem_readwrite(vdev, (char __user *)buf,
- count, ppos, true);
- }
+static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ if (!count)
+ return 0;
- return -EINVAL;
+ return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
}
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
@@ -472,7 +791,6 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
}
vma->vm_private_data = vdev;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
@@ -538,11 +856,44 @@ static void vfio_pci_remove(struct pci_dev *pdev)
kfree(vdev);
}
+static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct vfio_pci_device *vdev;
+ struct vfio_device *device;
+
+ device = vfio_device_get_from_dev(&pdev->dev);
+ if (device == NULL)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ vdev = vfio_device_data(device);
+ if (vdev == NULL) {
+ vfio_device_put(device);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ mutex_lock(&vdev->igate);
+
+ if (vdev->err_trigger)
+ eventfd_signal(vdev->err_trigger, 1);
+
+ mutex_unlock(&vdev->igate);
+
+ vfio_device_put(device);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static struct pci_error_handlers vfio_err_handlers = {
+ .error_detected = vfio_pci_aer_err_detected,
+};
+
static struct pci_driver vfio_pci_driver = {
.name = "vfio-pci",
.id_table = NULL, /* only dynamic ids */
.probe = vfio_pci_probe,
.remove = vfio_pci_remove,
+ .err_handler = &vfio_err_handlers,
};
static void __exit vfio_pci_cleanup(void)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 8b8f7d11e10..e50790e91f7 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
+#include <linux/slab.h>
#include "vfio_pci_private.h"
@@ -273,9 +274,10 @@ static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos,
return count;
}
-static int vfio_direct_config_write(struct vfio_pci_device *vdev, int pos,
- int count, struct perm_bits *perm,
- int offset, __le32 val)
+/* Raw access skips any kind of virtualization */
+static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
{
int ret;
@@ -286,13 +288,36 @@ static int vfio_direct_config_write(struct vfio_pci_device *vdev, int pos,
return count;
}
-/* Default all regions to read-only, no-virtualization */
+static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ int ret;
+
+ ret = vfio_user_config_read(vdev->pdev, pos, val, count);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ return count;
+}
+
+/* Default capability regions to read-only, no-virtualization */
static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = {
[0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
};
static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = {
[0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
};
+/*
+ * Default unassigned regions to raw read-write access. Some devices
+ * require this to function as they hide registers between the gaps in
+ * config space (be2net). Like MMIO and I/O port registers, we have
+ * to trust the hardware isolation.
+ */
+static struct perm_bits unassigned_perms = {
+ .readfn = vfio_raw_config_read,
+ .writefn = vfio_raw_config_write
+};
static void free_perm_bits(struct perm_bits *perm)
{
@@ -587,12 +612,46 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
return 0;
}
+static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ if (offset == PCI_PM_CTRL) {
+ pci_power_t state;
+
+ switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) {
+ case 0:
+ state = PCI_D0;
+ break;
+ case 1:
+ state = PCI_D1;
+ break;
+ case 2:
+ state = PCI_D2;
+ break;
+ case 3:
+ state = PCI_D3hot;
+ break;
+ }
+
+ pci_set_power_state(vdev->pdev, state);
+ }
+
+ return count;
+}
+
/* Permissions for the Power Management capability */
static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM]))
return -ENOMEM;
+ perm->writefn = vfio_pm_config_write;
+
/*
* We always virtualize the next field so we can remove
* capabilities from the chain if we want to.
@@ -600,10 +659,11 @@ static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
- * Power management is defined *per function*,
- * so we let the user write this
+ * Power management is defined *per function*, so we can let
+ * the user change power state, but we trap and initiate the
+ * change ourselves, so the state bits are read-only.
*/
- p_setd(perm, PCI_PM_CTRL, NO_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK);
return 0;
}
@@ -743,16 +803,16 @@ int __init vfio_pci_init_perm_bits(void)
/* Capabilities */
ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]);
- cap_perms[PCI_CAP_ID_VPD].writefn = vfio_direct_config_write;
+ cap_perms[PCI_CAP_ID_VPD].writefn = vfio_raw_config_write;
ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]);
- cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_direct_config_write;
+ cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write;
ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]);
ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]);
/* Extended capabilities */
ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
- ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_direct_config_write;
+ ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write;
if (ret)
vfio_pci_uninit_perm_bits();
@@ -765,9 +825,6 @@ static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
u8 cap;
int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE :
PCI_STD_HEADER_SIZEOF;
- base /= 4;
- pos /= 4;
-
cap = vdev->pci_config_map[pos];
if (cap == PCI_CAP_ID_BASIC)
@@ -777,7 +834,7 @@ static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap)
pos--;
- return pos * 4;
+ return pos;
}
static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos,
@@ -918,20 +975,20 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
int ret, evcc, phases, vc_arb;
int len = PCI_CAP_VC_BASE_SIZEOF;
- ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG1, &tmp);
+ ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp);
if (ret)
return pcibios_err_to_errno(ret);
- evcc = tmp & PCI_VC_REG1_EVCC; /* extended vc count */
- ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG2, &tmp);
+ evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */
+ ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp);
if (ret)
return pcibios_err_to_errno(ret);
- if (tmp & PCI_VC_REG2_128_PHASE)
+ if (tmp & PCI_VC_CAP2_128_PHASE)
phases = 128;
- else if (tmp & PCI_VC_REG2_64_PHASE)
+ else if (tmp & PCI_VC_CAP2_64_PHASE)
phases = 64;
- else if (tmp & PCI_VC_REG2_32_PHASE)
+ else if (tmp & PCI_VC_CAP2_32_PHASE)
phases = 32;
else
phases = 0;
@@ -955,6 +1012,7 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
{
struct pci_dev *pdev = vdev->pdev;
+ u32 dword;
u16 word;
u8 byte;
int ret;
@@ -968,7 +1026,9 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
return pcibios_err_to_errno(ret);
if (PCI_X_CMD_VERSION(word)) {
- vdev->extended_caps = true;
+ /* Test for extended capabilities */
+ pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
+ vdev->extended_caps = (dword != 0);
return PCI_CAP_PCIX_SIZEOF_V2;
} else
return PCI_CAP_PCIX_SIZEOF_V0;
@@ -980,17 +1040,15 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
return byte;
case PCI_CAP_ID_EXP:
- /* length based on version */
- ret = pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &word);
- if (ret)
- return pcibios_err_to_errno(ret);
+ /* Test for extended capabilities */
+ pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
+ vdev->extended_caps = (dword != 0);
- if ((word & PCI_EXP_FLAGS_VERS) == 1)
+ /* length based on version */
+ if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1)
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
- else {
- vdev->extended_caps = true;
+ else
return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
- }
case PCI_CAP_ID_HT:
ret = pci_read_config_byte(pdev, pos + 3, &byte);
if (ret)
@@ -1068,8 +1126,7 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
return pcibios_err_to_errno(ret);
byte &= PCI_DPA_CAP_SUBSTATE_MASK;
- byte = round_up(byte + 1, 4);
- return PCI_DPA_BASE_SIZEOF + byte;
+ return PCI_DPA_BASE_SIZEOF + byte + 1;
case PCI_EXT_CAP_ID_TPH:
ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword);
if (ret)
@@ -1078,9 +1135,9 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) {
int sts;
- sts = byte & PCI_TPH_CAP_ST_MASK;
+ sts = dword & PCI_TPH_CAP_ST_MASK;
sts >>= PCI_TPH_CAP_ST_SHIFT;
- return PCI_TPH_BASE_SIZEOF + round_up(sts * 2, 4);
+ return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2;
}
return PCI_TPH_BASE_SIZEOF;
default:
@@ -1194,8 +1251,8 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
}
/* Sanity check, do we overlap other capabilities? */
- for (i = 0; i < len; i += 4) {
- if (likely(map[(pos + i) / 4] == PCI_CAP_ID_INVALID))
+ for (i = 0; i < len; i++) {
+ if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
continue;
pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n",
@@ -1203,7 +1260,7 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
pos + i, map[pos + i], cap);
}
- memset(map + (pos / 4), cap, len / 4);
+ memset(map + pos, cap, len);
ret = vfio_fill_vconfig_bytes(vdev, pos, len);
if (ret)
return ret;
@@ -1278,8 +1335,8 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
hidden = true;
}
- for (i = 0; i < len; i += 4) {
- if (likely(map[(epos + i) / 4] == PCI_CAP_ID_INVALID))
+ for (i = 0; i < len; i++) {
+ if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
continue;
pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n",
@@ -1294,7 +1351,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
*/
BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID);
- memset(map + (epos / 4), ecap, len / 4);
+ memset(map + epos, ecap, len);
ret = vfio_fill_vconfig_bytes(vdev, epos, len);
if (ret)
return ret;
@@ -1341,10 +1398,12 @@ int vfio_config_init(struct vfio_pci_device *vdev)
int ret;
/*
- * Config space, caps and ecaps are all dword aligned, so we can
- * use one byte per dword to record the type.
+ * Config space, caps and ecaps are all dword aligned, so we could
+ * use one byte per dword to record the type. However, there are
+ * no requiremenst on the length of a capability, so the gap between
+ * capabilities needs byte granularity.
*/
- map = kmalloc(pdev->cfg_size / 4, GFP_KERNEL);
+ map = kmalloc(pdev->cfg_size, GFP_KERNEL);
if (!map)
return -ENOMEM;
@@ -1357,9 +1416,9 @@ int vfio_config_init(struct vfio_pci_device *vdev)
vdev->pci_config_map = map;
vdev->vconfig = vconfig;
- memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF / 4);
- memset(map + (PCI_STD_HEADER_SIZEOF / 4), PCI_CAP_ID_INVALID,
- (pdev->cfg_size - PCI_STD_HEADER_SIZEOF) / 4);
+ memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF);
+ memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID,
+ pdev->cfg_size - PCI_STD_HEADER_SIZEOF);
ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF);
if (ret)
@@ -1414,6 +1473,22 @@ void vfio_config_free(struct vfio_pci_device *vdev)
vdev->msi_perm = NULL;
}
+/*
+ * Find the remaining number of bytes in a dword that match the given
+ * position. Stop at either the end of the capability or the dword boundary.
+ */
+static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev,
+ loff_t pos)
+{
+ u8 cap = vdev->pci_config_map[pos];
+ size_t i;
+
+ for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++)
+ /* nop */;
+
+ return i;
+}
+
static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
{
@@ -1422,55 +1497,48 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
__le32 val = 0;
int cap_start = 0, offset;
u8 cap_id;
- ssize_t ret = count;
+ ssize_t ret;
- if (*ppos < 0 || *ppos + count > pdev->cfg_size)
+ if (*ppos < 0 || *ppos >= pdev->cfg_size ||
+ *ppos + count > pdev->cfg_size)
return -EFAULT;
/*
- * gcc can't seem to figure out we're a static function, only called
- * with count of 1/2/4 and hits copy_from_user_overflow without this.
+ * Chop accesses into aligned chunks containing no more than a
+ * single capability. Caller increments to the next chunk.
*/
- if (count > sizeof(val))
- return -EINVAL;
-
- cap_id = vdev->pci_config_map[*ppos / 4];
-
- if (cap_id == PCI_CAP_ID_INVALID) {
- if (iswrite)
- return ret; /* drop */
-
- /*
- * Per PCI spec 3.0, section 6.1, reads from reserved and
- * unimplemented registers return 0
- */
- if (copy_to_user(buf, &val, count))
- return -EFAULT;
+ count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos));
+ if (count >= 4 && !(*ppos % 4))
+ count = 4;
+ else if (count >= 2 && !(*ppos % 2))
+ count = 2;
+ else
+ count = 1;
- return ret;
- }
+ ret = count;
- /*
- * All capabilities are minimum 4 bytes and aligned on dword
- * boundaries. Since we don't support unaligned accesses, we're
- * only ever accessing a single capability.
- */
- if (*ppos >= PCI_CFG_SPACE_SIZE) {
- WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
-
- perm = &ecap_perms[cap_id];
- cap_start = vfio_find_cap_start(vdev, *ppos);
+ cap_id = vdev->pci_config_map[*ppos];
+ if (cap_id == PCI_CAP_ID_INVALID) {
+ perm = &unassigned_perms;
+ cap_start = *ppos;
} else {
- WARN_ON(cap_id > PCI_CAP_ID_MAX);
+ if (*ppos >= PCI_CFG_SPACE_SIZE) {
+ WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
- perm = &cap_perms[cap_id];
+ perm = &ecap_perms[cap_id];
+ cap_start = vfio_find_cap_start(vdev, *ppos);
+ } else {
+ WARN_ON(cap_id > PCI_CAP_ID_MAX);
- if (cap_id == PCI_CAP_ID_MSI)
- perm = vdev->msi_perm;
+ perm = &cap_perms[cap_id];
- if (cap_id > PCI_CAP_ID_BASIC)
- cap_start = vfio_find_cap_start(vdev, *ppos);
+ if (cap_id == PCI_CAP_ID_MSI)
+ perm = vdev->msi_perm;
+
+ if (cap_id > PCI_CAP_ID_BASIC)
+ cap_start = vfio_find_cap_start(vdev, *ppos);
+ }
}
WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC);
@@ -1501,9 +1569,8 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
return ret;
}
-ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite)
+ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
{
size_t done = 0;
int ret = 0;
@@ -1511,20 +1578,8 @@ ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
pos &= VFIO_PCI_OFFSET_MASK;
- /*
- * We want to both keep the access size the caller users as well as
- * support reading large chunks of config space in a single call.
- * PCI doesn't support unaligned accesses, so we can safely break
- * those apart.
- */
while (count) {
- if (count >= 4 && !(pos % 4))
- ret = vfio_config_do_rw(vdev, buf, 4, &pos, iswrite);
- else if (count >= 2 && !(pos % 2))
- ret = vfio_config_do_rw(vdev, buf, 2, &pos, iswrite);
- else
- ret = vfio_config_do_rw(vdev, buf, 1, &pos, iswrite);
-
+ ret = vfio_config_do_rw(vdev, buf, count, &pos, iswrite);
if (ret < 0)
return ret;
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 3639371fa69..9dd49c9839a 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -22,6 +22,7 @@
#include <linux/vfio.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/slab.h>
#include "vfio_pci_private.h"
@@ -129,8 +130,8 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
void (*thread)(struct vfio_pci_device *, void *),
void *data, struct virqfd **pvirqfd, int fd)
{
- struct file *file = NULL;
- struct eventfd_ctx *ctx = NULL;
+ struct fd irqfd;
+ struct eventfd_ctx *ctx;
struct virqfd *virqfd;
int ret = 0;
unsigned int events;
@@ -148,16 +149,16 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
INIT_WORK(&virqfd->inject, virqfd_inject);
- file = eventfd_fget(fd);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto fail;
+ irqfd = fdget(fd);
+ if (!irqfd.file) {
+ ret = -EBADF;
+ goto err_fd;
}
- ctx = eventfd_ctx_fileget(file);
+ ctx = eventfd_ctx_fileget(irqfd.file);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
- goto fail;
+ goto err_ctx;
}
virqfd->eventfd = ctx;
@@ -173,7 +174,7 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
if (*pvirqfd) {
spin_unlock_irq(&vdev->irqlock);
ret = -EBUSY;
- goto fail;
+ goto err_busy;
}
*pvirqfd = virqfd;
@@ -186,7 +187,7 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
- events = file->f_op->poll(file, &virqfd->pt);
+ events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt);
/*
* Check if there was an event already pending on the eventfd
@@ -201,17 +202,14 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
* Do not drop the file until the irqfd is fully initialized,
* otherwise we might race against the POLLHUP.
*/
- fput(file);
+ fdput(irqfd);
return 0;
-
-fail:
- if (ctx && !IS_ERR(ctx))
- eventfd_ctx_put(ctx);
-
- if (file && !IS_ERR(file))
- fput(file);
-
+err_busy:
+ eventfd_ctx_put(ctx);
+err_ctx:
+ fdput(irqfd);
+err_fd:
kfree(virqfd);
return ret;
@@ -286,7 +284,8 @@ void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
* a signal is necessary, which can then be handled via a work queue
* or directly depending on the caller.
*/
-int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev, void *unused)
+static int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev,
+ void *unused)
{
struct pci_dev *pdev = vdev->pdev;
unsigned long flags;
@@ -483,15 +482,19 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
for (i = 0; i < nvec; i++)
vdev->msix[i].entry = i;
- ret = pci_enable_msix(pdev, vdev->msix, nvec);
- if (ret) {
+ ret = pci_enable_msix_range(pdev, vdev->msix, 1, nvec);
+ if (ret < nvec) {
+ if (ret > 0)
+ pci_disable_msix(pdev);
kfree(vdev->msix);
kfree(vdev->ctx);
return ret;
}
} else {
- ret = pci_enable_msi_block(pdev, nvec);
- if (ret) {
+ ret = pci_enable_msi_range(pdev, 1, nvec);
+ if (ret < nvec) {
+ if (ret > 0)
+ pci_disable_msi(pdev);
kfree(vdev->ctx);
return ret;
}
@@ -745,6 +748,46 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
return 0;
}
+static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ int32_t fd = *(int32_t *)data;
+
+ if ((index != VFIO_PCI_ERR_IRQ_INDEX) ||
+ !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
+ return -EINVAL;
+
+ /* DATA_NONE/DATA_BOOL enables loopback testing */
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ if (vdev->err_trigger)
+ eventfd_signal(vdev->err_trigger, 1);
+ return 0;
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t trigger = *(uint8_t *)data;
+ if (trigger && vdev->err_trigger)
+ eventfd_signal(vdev->err_trigger, 1);
+ return 0;
+ }
+
+ /* Handle SET_DATA_EVENTFD */
+ if (fd == -1) {
+ if (vdev->err_trigger)
+ eventfd_ctx_put(vdev->err_trigger);
+ vdev->err_trigger = NULL;
+ return 0;
+ } else if (fd >= 0) {
+ struct eventfd_ctx *efdctx;
+ efdctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+ if (vdev->err_trigger)
+ eventfd_ctx_put(vdev->err_trigger);
+ vdev->err_trigger = efdctx;
+ return 0;
+ } else
+ return -EINVAL;
+}
int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
unsigned index, unsigned start, unsigned count,
void *data)
@@ -779,6 +822,13 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
break;
}
break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ if (pci_is_pcie(vdev->pdev))
+ func = vfio_pci_set_err_trigger;
+ break;
+ }
}
if (!func)
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 611827cba8c..9c6d5d0f3b0 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -53,8 +53,10 @@ struct vfio_pci_device {
bool reset_works;
bool extended_caps;
bool bardirty;
+ bool has_vga;
struct pci_saved_state *pci_saved_state;
atomic_t refcnt;
+ struct eventfd_ctx *err_trigger;
};
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
@@ -70,15 +72,15 @@ extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev,
uint32_t flags, unsigned index,
unsigned start, unsigned count, void *data);
-extern ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
-extern ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
-extern ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
+extern ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite);
+
+extern ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+
+extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
extern int vfio_pci_init_perm_bits(void);
extern void vfio_pci_uninit_perm_bits(void);
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index f72323ef618..210db24d220 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -17,253 +17,222 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/vgaarb.h>
#include "vfio_pci_private.h"
-/* I/O Port BAR access */
-ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite)
+/*
+ * Read or write from an __iomem region (MMIO or I/O port) with an excluded
+ * range which is inaccessible. The excluded range drops writes and fills
+ * reads with -1. This is intended for handling MSI-X vector tables and
+ * leftover space for ROM BARs.
+ */
+static ssize_t do_io_rw(void __iomem *io, char __user *buf,
+ loff_t off, size_t count, size_t x_start,
+ size_t x_end, bool iswrite)
{
- struct pci_dev *pdev = vdev->pdev;
- loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
- int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- void __iomem *io;
- size_t done = 0;
-
- if (!pci_resource_start(pdev, bar))
- return -EINVAL;
-
- if (pos + count > pci_resource_len(pdev, bar))
- return -EINVAL;
-
- if (!vdev->barmap[bar]) {
- int ret;
-
- ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
- if (ret)
- return ret;
-
- vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
-
- if (!vdev->barmap[bar]) {
- pci_release_selected_regions(pdev, 1 << bar);
- return -EINVAL;
- }
- }
-
- io = vdev->barmap[bar];
+ ssize_t done = 0;
while (count) {
- int filled;
+ size_t fillable, filled;
+
+ if (off < x_start)
+ fillable = min(count, (size_t)(x_start - off));
+ else if (off >= x_end)
+ fillable = count;
+ else
+ fillable = 0;
- if (count >= 3 && !(pos % 4)) {
+ if (fillable >= 4 && !(off % 4)) {
__le32 val;
if (iswrite) {
if (copy_from_user(&val, buf, 4))
return -EFAULT;
- iowrite32(le32_to_cpu(val), io + pos);
+ iowrite32(le32_to_cpu(val), io + off);
} else {
- val = cpu_to_le32(ioread32(io + pos));
+ val = cpu_to_le32(ioread32(io + off));
if (copy_to_user(buf, &val, 4))
return -EFAULT;
}
filled = 4;
-
- } else if ((pos % 2) == 0 && count >= 2) {
+ } else if (fillable >= 2 && !(off % 2)) {
__le16 val;
if (iswrite) {
if (copy_from_user(&val, buf, 2))
return -EFAULT;
- iowrite16(le16_to_cpu(val), io + pos);
+ iowrite16(le16_to_cpu(val), io + off);
} else {
- val = cpu_to_le16(ioread16(io + pos));
+ val = cpu_to_le16(ioread16(io + off));
if (copy_to_user(buf, &val, 2))
return -EFAULT;
}
filled = 2;
- } else {
+ } else if (fillable) {
u8 val;
if (iswrite) {
if (copy_from_user(&val, buf, 1))
return -EFAULT;
- iowrite8(val, io + pos);
+ iowrite8(val, io + off);
} else {
- val = ioread8(io + pos);
+ val = ioread8(io + off);
if (copy_to_user(buf, &val, 1))
return -EFAULT;
}
filled = 1;
+ } else {
+ /* Fill reads with -1, drop writes */
+ filled = min(count, (size_t)(x_end - off));
+ if (!iswrite) {
+ u8 val = 0xFF;
+ size_t i;
+
+ for (i = 0; i < filled; i++)
+ if (copy_to_user(buf + i, &val, 1))
+ return -EFAULT;
+ }
}
count -= filled;
done += filled;
+ off += filled;
buf += filled;
- pos += filled;
}
- *ppos += done;
-
return done;
}
-/*
- * MMIO BAR access
- * We handle two excluded ranges here as well, if the user tries to read
- * the ROM beyond what PCI tells us is available or the MSI-X table region,
- * we return 0xFF and writes are dropped.
- */
-ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite)
+ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
{
struct pci_dev *pdev = vdev->pdev;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- void __iomem *io;
+ size_t x_start = 0, x_end = 0;
resource_size_t end;
- size_t done = 0;
- size_t x_start = 0, x_end = 0; /* excluded range */
+ void __iomem *io;
+ ssize_t done;
if (!pci_resource_start(pdev, bar))
return -EINVAL;
end = pci_resource_len(pdev, bar);
- if (pos > end)
+ if (pos >= end)
return -EINVAL;
- if (pos == end)
- return 0;
-
- if (pos + count > end)
- count = end - pos;
+ count = min(count, (size_t)(end - pos));
if (bar == PCI_ROM_RESOURCE) {
+ /*
+ * The ROM can fill less space than the BAR, so we start the
+ * excluded range at the end of the actual ROM. This makes
+ * filling large ROM BARs much faster.
+ */
io = pci_map_rom(pdev, &x_start);
+ if (!io)
+ return -ENOMEM;
x_end = end;
- } else {
- if (!vdev->barmap[bar]) {
- int ret;
-
- ret = pci_request_selected_regions(pdev, 1 << bar,
- "vfio");
- if (ret)
- return ret;
+ } else if (!vdev->barmap[bar]) {
+ int ret;
- vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
+ ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
+ if (ret)
+ return ret;
- if (!vdev->barmap[bar]) {
- pci_release_selected_regions(pdev, 1 << bar);
- return -EINVAL;
- }
+ io = pci_iomap(pdev, bar, 0);
+ if (!io) {
+ pci_release_selected_regions(pdev, 1 << bar);
+ return -ENOMEM;
}
+ vdev->barmap[bar] = io;
+ } else
io = vdev->barmap[bar];
- if (bar == vdev->msix_bar) {
- x_start = vdev->msix_offset;
- x_end = vdev->msix_offset + vdev->msix_size;
- }
+ if (bar == vdev->msix_bar) {
+ x_start = vdev->msix_offset;
+ x_end = vdev->msix_offset + vdev->msix_size;
}
- if (!io)
- return -EINVAL;
-
- while (count) {
- size_t fillable, filled;
-
- if (pos < x_start)
- fillable = x_start - pos;
- else if (pos >= x_end)
- fillable = end - pos;
- else
- fillable = 0;
-
- if (fillable >= 4 && !(pos % 4) && (count >= 4)) {
- __le32 val;
-
- if (iswrite) {
- if (copy_from_user(&val, buf, 4))
- goto out;
-
- iowrite32(le32_to_cpu(val), io + pos);
- } else {
- val = cpu_to_le32(ioread32(io + pos));
+ done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
- if (copy_to_user(buf, &val, 4))
- goto out;
- }
+ if (done >= 0)
+ *ppos += done;
- filled = 4;
- } else if (fillable >= 2 && !(pos % 2) && (count >= 2)) {
- __le16 val;
-
- if (iswrite) {
- if (copy_from_user(&val, buf, 2))
- goto out;
-
- iowrite16(le16_to_cpu(val), io + pos);
- } else {
- val = cpu_to_le16(ioread16(io + pos));
-
- if (copy_to_user(buf, &val, 2))
- goto out;
- }
-
- filled = 2;
- } else if (fillable) {
- u8 val;
+ if (bar == PCI_ROM_RESOURCE)
+ pci_unmap_rom(pdev, io);
- if (iswrite) {
- if (copy_from_user(&val, buf, 1))
- goto out;
+ return done;
+}
- iowrite8(val, io + pos);
- } else {
- val = ioread8(io + pos);
+ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ int ret;
+ loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ void __iomem *iomem = NULL;
+ unsigned int rsrc;
+ bool is_ioport;
+ ssize_t done;
+
+ if (!vdev->has_vga)
+ return -EINVAL;
- if (copy_to_user(buf, &val, 1))
- goto out;
- }
+ switch (pos) {
+ case 0xa0000 ... 0xbffff:
+ count = min(count, (size_t)(0xc0000 - pos));
+ iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
+ off = pos - 0xa0000;
+ rsrc = VGA_RSRC_LEGACY_MEM;
+ is_ioport = false;
+ break;
+ case 0x3b0 ... 0x3bb:
+ count = min(count, (size_t)(0x3bc - pos));
+ iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1);
+ off = pos - 0x3b0;
+ rsrc = VGA_RSRC_LEGACY_IO;
+ is_ioport = true;
+ break;
+ case 0x3c0 ... 0x3df:
+ count = min(count, (size_t)(0x3e0 - pos));
+ iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1);
+ off = pos - 0x3c0;
+ rsrc = VGA_RSRC_LEGACY_IO;
+ is_ioport = true;
+ break;
+ default:
+ return -EINVAL;
+ }
- filled = 1;
- } else {
- /* Drop writes, fill reads with FF */
- filled = min((size_t)(x_end - pos), count);
- if (!iswrite) {
- char val = 0xFF;
- size_t i;
+ if (!iomem)
+ return -ENOMEM;
- for (i = 0; i < filled; i++) {
- if (put_user(val, buf + i))
- goto out;
- }
- }
+ ret = vga_get_interruptible(vdev->pdev, rsrc);
+ if (ret) {
+ is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
+ return ret;
+ }
- }
+ done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
- count -= filled;
- done += filled;
- buf += filled;
- pos += filled;
- }
+ vga_put(vdev->pdev, rsrc);
- *ppos += done;
+ is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
-out:
- if (bar == PCI_ROM_RESOURCE)
- pci_unmap_rom(pdev, io);
+ if (done >= 0)
+ *ppos += done;
- return count ? -EFAULT : done;
+ return done;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 12c264d3b05..f018d8d0f97 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -22,10 +22,13 @@
#include <linux/idr.h>
#include <linux/iommu.h>
#include <linux/list.h>
+#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
@@ -43,9 +46,7 @@ static struct vfio {
struct idr group_idr;
struct mutex group_lock;
struct cdev group_cdev;
- struct device *dev;
- dev_t devt;
- struct cdev cdev;
+ dev_t group_devt;
wait_queue_head_t release_q;
} vfio;
@@ -57,7 +58,7 @@ struct vfio_iommu_driver {
struct vfio_container {
struct kref kref;
struct list_head group_list;
- struct mutex group_lock;
+ struct rw_semaphore group_lock;
struct vfio_iommu_driver *iommu_driver;
void *iommu_data;
};
@@ -74,6 +75,7 @@ struct vfio_group {
struct notifier_block nb;
struct list_head vfio_next;
struct list_head container_next;
+ atomic_t opened;
};
struct vfio_device {
@@ -139,23 +141,7 @@ EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
*/
static int vfio_alloc_group_minor(struct vfio_group *group)
{
- int ret, minor;
-
-again:
- if (unlikely(idr_pre_get(&vfio.group_idr, GFP_KERNEL) == 0))
- return -ENOMEM;
-
- /* index 0 is used by /dev/vfio/vfio */
- ret = idr_get_new_above(&vfio.group_idr, group, 1, &minor);
- if (ret == -EAGAIN)
- goto again;
- if (ret || minor > MINORMASK) {
- if (minor > MINORMASK)
- idr_remove(&vfio.group_idr, minor);
- return -ENOSPC;
- }
-
- return minor;
+ return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
}
static void vfio_free_group_minor(int minor)
@@ -219,6 +205,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
INIT_LIST_HEAD(&group->device_list);
mutex_init(&group->device_lock);
atomic_set(&group->container_users, 0);
+ atomic_set(&group->opened, 0);
group->iommu_group = iommu_group;
group->nb.notifier_call = vfio_iommu_group_notifier;
@@ -254,7 +241,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
}
}
- dev = device_create(vfio.class, NULL, MKDEV(MAJOR(vfio.devt), minor),
+ dev = device_create(vfio.class, NULL,
+ MKDEV(MAJOR(vfio.group_devt), minor),
group, "%d", iommu_group_id(iommu_group));
if (IS_ERR(dev)) {
vfio_free_group_minor(minor);
@@ -279,7 +267,7 @@ static void vfio_group_release(struct kref *kref)
WARN_ON(!list_empty(&group->device_list));
- device_destroy(vfio.class, MKDEV(MAJOR(vfio.devt), group->minor));
+ device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
list_del(&group->vfio_next);
vfio_free_group_minor(group->minor);
vfio_group_unlock_and_free(group);
@@ -361,7 +349,6 @@ struct vfio_device *vfio_group_create_device(struct vfio_group *group,
void *device_data)
{
struct vfio_device *device;
- int ret;
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device)
@@ -372,12 +359,7 @@ struct vfio_device *vfio_group_create_device(struct vfio_group *group,
device->group = group;
device->ops = ops;
device->device_data = device_data;
-
- ret = dev_set_drvdata(dev, device);
- if (ret) {
- kfree(device);
- return ERR_PTR(ret);
- }
+ dev_set_drvdata(dev, device);
/* No need to get group_lock, caller has group reference */
vfio_group_get(group);
@@ -407,12 +389,13 @@ static void vfio_device_release(struct kref *kref)
}
/* Device reference always implies a group reference */
-static void vfio_device_put(struct vfio_device *device)
+void vfio_device_put(struct vfio_device *device)
{
struct vfio_group *group = device->group;
kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
vfio_group_put(group);
}
+EXPORT_SYMBOL_GPL(vfio_device_put);
static void vfio_device_get(struct vfio_device *device)
{
@@ -442,7 +425,7 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
* a device. It's not always practical to leave a device within a group
* driverless as it could get re-bound to something unsafe.
*/
-static const char * const vfio_driver_whitelist[] = { "pci-stub" };
+static const char * const vfio_driver_whitelist[] = { "pci-stub", "pcieport" };
static bool vfio_whitelisted_driver(struct device_driver *drv)
{
@@ -504,27 +487,6 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
return 0;
}
-static int vfio_group_nb_del_dev(struct vfio_group *group, struct device *dev)
-{
- struct vfio_device *device;
-
- /*
- * Expect to fall out here. If a device was in use, it would
- * have been bound to a vfio sub-driver, which would have blocked
- * in .remove at vfio_del_group_dev. Sanity check that we no
- * longer track the device, so it's safe to remove.
- */
- device = vfio_group_get_device(group, dev);
- if (likely(!device))
- return 0;
-
- WARN("Device %s removed from live group %d!\n", dev_name(dev),
- iommu_group_id(group->iommu_group));
-
- vfio_device_put(device);
- return 0;
-}
-
static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
{
/* We don't care what happens when the group isn't in use */
@@ -541,13 +503,11 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
struct device *dev = data;
/*
- * Need to go through a group_lock lookup to get a reference or
- * we risk racing a group being removed. Leave a WARN_ON for
- * debuging, but if the group no longer exists, a spurious notify
- * is harmless.
+ * Need to go through a group_lock lookup to get a reference or we
+ * risk racing a group being removed. Ignore spurious notifies.
*/
group = vfio_group_try_get(group);
- if (WARN_ON(!group))
+ if (!group)
return NOTIFY_OK;
switch (action) {
@@ -555,7 +515,13 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
vfio_group_nb_add_dev(group, dev);
break;
case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
- vfio_group_nb_del_dev(group, dev);
+ /*
+ * Nothing to do here. If the device is in use, then the
+ * vfio sub-driver should block the remove callback until
+ * it is unused. If the device is unused or attached to a
+ * stub driver, then it should be released and we don't
+ * care that it will be going away.
+ */
break;
case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
pr_debug("%s: Device %s, group %d binding to driver\n",
@@ -642,33 +608,43 @@ int vfio_add_group_dev(struct device *dev,
}
EXPORT_SYMBOL_GPL(vfio_add_group_dev);
-/* Test whether a struct device is present in our tracking */
-static bool vfio_dev_present(struct device *dev)
+/**
+ * Get a reference to the vfio_device for a device that is known to
+ * be bound to a vfio driver. The driver implicitly holds a
+ * vfio_device reference between vfio_add_group_dev and
+ * vfio_del_group_dev. We can therefore use drvdata to increment
+ * that reference from the struct device. This additional
+ * reference must be released by calling vfio_device_put.
+ */
+struct vfio_device *vfio_device_get_from_dev(struct device *dev)
{
- struct iommu_group *iommu_group;
- struct vfio_group *group;
- struct vfio_device *device;
+ struct vfio_device *device = dev_get_drvdata(dev);
- iommu_group = iommu_group_get(dev);
- if (!iommu_group)
- return false;
+ vfio_device_get(device);
- group = vfio_group_get_from_iommu(iommu_group);
- if (!group) {
- iommu_group_put(iommu_group);
- return false;
- }
+ return device;
+}
+EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
+
+/*
+ * Caller must hold a reference to the vfio_device
+ */
+void *vfio_device_data(struct vfio_device *device)
+{
+ return device->device_data;
+}
+EXPORT_SYMBOL_GPL(vfio_device_data);
+
+/* Given a referenced group, check if it contains the device */
+static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
+{
+ struct vfio_device *device;
device = vfio_group_get_device(group, dev);
- if (!device) {
- vfio_group_put(group);
- iommu_group_put(iommu_group);
+ if (!device)
return false;
- }
vfio_device_put(device);
- vfio_group_put(group);
- iommu_group_put(iommu_group);
return true;
}
@@ -682,10 +658,18 @@ void *vfio_del_group_dev(struct device *dev)
struct iommu_group *iommu_group = group->iommu_group;
void *device_data = device->device_data;
+ /*
+ * The group exists so long as we have a device reference. Get
+ * a group reference and use it to scan for the device going away.
+ */
+ vfio_group_get(group);
+
vfio_device_put(device);
/* TODO send a signal to encourage this to be released */
- wait_event(vfio.release_q, !vfio_dev_present(dev));
+ wait_event(vfio.release_q, !vfio_dev_present(group, dev));
+
+ vfio_group_put(group);
iommu_group_put(iommu_group);
@@ -699,9 +683,13 @@ EXPORT_SYMBOL_GPL(vfio_del_group_dev);
static long vfio_ioctl_check_extension(struct vfio_container *container,
unsigned long arg)
{
- struct vfio_iommu_driver *driver = container->iommu_driver;
+ struct vfio_iommu_driver *driver;
long ret = 0;
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+
switch (arg) {
/* No base extensions yet */
default:
@@ -731,10 +719,12 @@ static long vfio_ioctl_check_extension(struct vfio_container *container,
VFIO_CHECK_EXTENSION, arg);
}
+ up_read(&container->group_lock);
+
return ret;
}
-/* hold container->group_lock */
+/* hold write lock on container->group_lock */
static int __vfio_container_attach_groups(struct vfio_container *container,
struct vfio_iommu_driver *driver,
void *data)
@@ -765,7 +755,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
struct vfio_iommu_driver *driver;
long ret = -ENODEV;
- mutex_lock(&container->group_lock);
+ down_write(&container->group_lock);
/*
* The container is designed to be an unprivileged interface while
@@ -776,7 +766,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
* the container is deprivileged and returns to an unset state.
*/
if (list_empty(&container->group_list) || container->iommu_driver) {
- mutex_unlock(&container->group_lock);
+ up_write(&container->group_lock);
return -EINVAL;
}
@@ -823,7 +813,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
mutex_unlock(&vfio.iommu_drivers_lock);
skip_drivers_unlock:
- mutex_unlock(&container->group_lock);
+ up_write(&container->group_lock);
return ret;
}
@@ -839,9 +829,6 @@ static long vfio_fops_unl_ioctl(struct file *filep,
if (!container)
return ret;
- driver = container->iommu_driver;
- data = container->iommu_data;
-
switch (cmd) {
case VFIO_GET_API_VERSION:
ret = VFIO_API_VERSION;
@@ -853,8 +840,15 @@ static long vfio_fops_unl_ioctl(struct file *filep,
ret = vfio_ioctl_set_iommu(container, arg);
break;
default:
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+ data = container->iommu_data;
+
if (driver) /* passthrough all unrecognized ioctls */
ret = driver->ops->ioctl(data, cmd, arg);
+
+ up_read(&container->group_lock);
}
return ret;
@@ -878,7 +872,7 @@ static int vfio_fops_open(struct inode *inode, struct file *filep)
return -ENOMEM;
INIT_LIST_HEAD(&container->group_list);
- mutex_init(&container->group_lock);
+ init_rwsem(&container->group_lock);
kref_init(&container->kref);
filep->private_data = container;
@@ -905,35 +899,55 @@ static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
size_t count, loff_t *ppos)
{
struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver = container->iommu_driver;
+ struct vfio_iommu_driver *driver;
+ ssize_t ret = -EINVAL;
- if (unlikely(!driver || !driver->ops->read))
- return -EINVAL;
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->read))
+ ret = driver->ops->read(container->iommu_data,
+ buf, count, ppos);
+
+ up_read(&container->group_lock);
- return driver->ops->read(container->iommu_data, buf, count, ppos);
+ return ret;
}
static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
size_t count, loff_t *ppos)
{
struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver = container->iommu_driver;
+ struct vfio_iommu_driver *driver;
+ ssize_t ret = -EINVAL;
- if (unlikely(!driver || !driver->ops->write))
- return -EINVAL;
+ down_read(&container->group_lock);
- return driver->ops->write(container->iommu_data, buf, count, ppos);
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->write))
+ ret = driver->ops->write(container->iommu_data,
+ buf, count, ppos);
+
+ up_read(&container->group_lock);
+
+ return ret;
}
static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
{
struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver = container->iommu_driver;
+ struct vfio_iommu_driver *driver;
+ int ret = -EINVAL;
- if (unlikely(!driver || !driver->ops->mmap))
- return -EINVAL;
+ down_read(&container->group_lock);
+
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->mmap))
+ ret = driver->ops->mmap(container->iommu_data, vma);
+
+ up_read(&container->group_lock);
- return driver->ops->mmap(container->iommu_data, vma);
+ return ret;
}
static const struct file_operations vfio_fops = {
@@ -957,7 +971,7 @@ static void __vfio_group_unset_container(struct vfio_group *group)
struct vfio_container *container = group->container;
struct vfio_iommu_driver *driver;
- mutex_lock(&container->group_lock);
+ down_write(&container->group_lock);
driver = container->iommu_driver;
if (driver)
@@ -975,7 +989,7 @@ static void __vfio_group_unset_container(struct vfio_group *group)
container->iommu_data = NULL;
}
- mutex_unlock(&container->group_lock);
+ up_write(&container->group_lock);
vfio_container_put(container);
}
@@ -1035,7 +1049,7 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
container = f.file->private_data;
WARN_ON(!container); /* fget ensures we don't race vfio_release */
- mutex_lock(&container->group_lock);
+ down_write(&container->group_lock);
driver = container->iommu_driver;
if (driver) {
@@ -1053,7 +1067,7 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
atomic_inc(&group->container_users);
unlock_out:
- mutex_unlock(&container->group_lock);
+ up_write(&container->group_lock);
fdput(f);
return ret;
}
@@ -1088,7 +1102,7 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
* We can't use anon_inode_getfd() because we need to modify
* the f_mode flags directly to allow more than just ioctls
*/
- ret = get_unused_fd();
+ ret = get_unused_fd_flags(O_CLOEXEC);
if (ret < 0) {
device->ops->release(device->device_data);
break;
@@ -1200,12 +1214,22 @@ static long vfio_group_fops_compat_ioctl(struct file *filep,
static int vfio_group_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_group *group;
+ int opened;
group = vfio_group_get_from_minor(iminor(inode));
if (!group)
return -ENODEV;
+ /* Do we need multiple instances of the group open? Seems not. */
+ opened = atomic_cmpxchg(&group->opened, 0, 1);
+ if (opened) {
+ vfio_group_put(group);
+ return -EBUSY;
+ }
+
+ /* Is something still in use from a previous open? */
if (group->container) {
+ atomic_dec(&group->opened);
vfio_group_put(group);
return -EBUSY;
}
@@ -1223,6 +1247,8 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
vfio_group_try_dissolve_container(group);
+ atomic_dec(&group->opened);
+
vfio_group_put(group);
return 0;
@@ -1320,6 +1346,74 @@ static const struct file_operations vfio_device_fops = {
};
/**
+ * External user API, exported by symbols to be linked dynamically.
+ *
+ * The protocol includes:
+ * 1. do normal VFIO init operation:
+ * - opening a new container;
+ * - attaching group(s) to it;
+ * - setting an IOMMU driver for a container.
+ * When IOMMU is set for a container, all groups in it are
+ * considered ready to use by an external user.
+ *
+ * 2. User space passes a group fd to an external user.
+ * The external user calls vfio_group_get_external_user()
+ * to verify that:
+ * - the group is initialized;
+ * - IOMMU is set for it.
+ * If both checks passed, vfio_group_get_external_user()
+ * increments the container user counter to prevent
+ * the VFIO group from disposal before KVM exits.
+ *
+ * 3. The external user calls vfio_external_user_iommu_id()
+ * to know an IOMMU ID.
+ *
+ * 4. When the external KVM finishes, it calls
+ * vfio_group_put_external_user() to release the VFIO group.
+ * This call decrements the container user counter.
+ */
+struct vfio_group *vfio_group_get_external_user(struct file *filep)
+{
+ struct vfio_group *group = filep->private_data;
+
+ if (filep->f_op != &vfio_group_fops)
+ return ERR_PTR(-EINVAL);
+
+ if (!atomic_inc_not_zero(&group->container_users))
+ return ERR_PTR(-EINVAL);
+
+ if (!group->container->iommu_driver ||
+ !vfio_group_viable(group)) {
+ atomic_dec(&group->container_users);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vfio_group_get(group);
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
+
+void vfio_group_put_external_user(struct vfio_group *group)
+{
+ vfio_group_put(group);
+ vfio_group_try_dissolve_container(group);
+}
+EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
+
+int vfio_external_user_iommu_id(struct vfio_group *group)
+{
+ return iommu_group_id(group->iommu_group);
+}
+EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
+
+long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
+{
+ return vfio_ioctl_check_extension(group->container, arg);
+}
+EXPORT_SYMBOL_GPL(vfio_external_check_extension);
+
+/**
* Module/class support
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
@@ -1327,6 +1421,14 @@ static char *vfio_devnode(struct device *dev, umode_t *mode)
return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
}
+static struct miscdevice vfio_dev = {
+ .minor = VFIO_MINOR,
+ .name = "vfio",
+ .fops = &vfio_fops,
+ .nodename = "vfio/vfio",
+ .mode = S_IRUGO | S_IWUGO,
+};
+
static int __init vfio_init(void)
{
int ret;
@@ -1338,6 +1440,13 @@ static int __init vfio_init(void)
INIT_LIST_HEAD(&vfio.iommu_drivers_list);
init_waitqueue_head(&vfio.release_q);
+ ret = misc_register(&vfio_dev);
+ if (ret) {
+ pr_err("vfio: misc device register failed\n");
+ return ret;
+ }
+
+ /* /dev/vfio/$GROUP */
vfio.class = class_create(THIS_MODULE, "vfio");
if (IS_ERR(vfio.class)) {
ret = PTR_ERR(vfio.class);
@@ -1346,27 +1455,14 @@ static int __init vfio_init(void)
vfio.class->devnode = vfio_devnode;
- ret = alloc_chrdev_region(&vfio.devt, 0, MINORMASK, "vfio");
- if (ret)
- goto err_base_chrdev;
-
- cdev_init(&vfio.cdev, &vfio_fops);
- ret = cdev_add(&vfio.cdev, vfio.devt, 1);
+ ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
if (ret)
- goto err_base_cdev;
-
- vfio.dev = device_create(vfio.class, NULL, vfio.devt, NULL, "vfio");
- if (IS_ERR(vfio.dev)) {
- ret = PTR_ERR(vfio.dev);
- goto err_base_dev;
- }
+ goto err_alloc_chrdev;
- /* /dev/vfio/$GROUP */
cdev_init(&vfio.group_cdev, &vfio_group_fops);
- ret = cdev_add(&vfio.group_cdev,
- MKDEV(MAJOR(vfio.devt), 1), MINORMASK - 1);
+ ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
if (ret)
- goto err_groups_cdev;
+ goto err_cdev_add;
pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
@@ -1376,19 +1472,17 @@ static int __init vfio_init(void)
* drivers.
*/
request_module_nowait("vfio_iommu_type1");
+ request_module_nowait("vfio_iommu_spapr_tce");
return 0;
-err_groups_cdev:
- device_destroy(vfio.class, vfio.devt);
-err_base_dev:
- cdev_del(&vfio.cdev);
-err_base_cdev:
- unregister_chrdev_region(vfio.devt, MINORMASK);
-err_base_chrdev:
+err_cdev_add:
+ unregister_chrdev_region(vfio.group_devt, MINORMASK);
+err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
err_class:
+ misc_deregister(&vfio_dev);
return ret;
}
@@ -1398,11 +1492,10 @@ static void __exit vfio_cleanup(void)
idr_destroy(&vfio.group_idr);
cdev_del(&vfio.group_cdev);
- device_destroy(vfio.class, vfio.devt);
- cdev_del(&vfio.cdev);
- unregister_chrdev_region(vfio.devt, MINORMASK);
+ unregister_chrdev_region(vfio.group_devt, MINORMASK);
class_destroy(vfio.class);
vfio.class = NULL;
+ misc_deregister(&vfio_dev);
}
module_init(vfio_init);
@@ -1412,3 +1505,5 @@ MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS_MISCDEV(VFIO_MINOR);
+MODULE_ALIAS("devname:vfio/vfio");
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
new file mode 100644
index 00000000000..a84788ba662
--- /dev/null
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -0,0 +1,377 @@
+/*
+ * VFIO: IOMMU DMA mapping support for TCE on POWER
+ *
+ * Copyright (C) 2013 IBM Corp. All rights reserved.
+ * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from original vfio_iommu_type1.c:
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/err.h>
+#include <linux/vfio.h>
+#include <asm/iommu.h>
+#include <asm/tce.h>
+
+#define DRIVER_VERSION "0.1"
+#define DRIVER_AUTHOR "aik@ozlabs.ru"
+#define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
+
+static void tce_iommu_detach_group(void *iommu_data,
+ struct iommu_group *iommu_group);
+
+/*
+ * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
+ *
+ * This code handles mapping and unmapping of user data buffers
+ * into DMA'ble space using the IOMMU
+ */
+
+/*
+ * The container descriptor supports only a single group per container.
+ * Required by the API as the container is not supplied with the IOMMU group
+ * at the moment of initialization.
+ */
+struct tce_container {
+ struct mutex lock;
+ struct iommu_table *tbl;
+ bool enabled;
+};
+
+static int tce_iommu_enable(struct tce_container *container)
+{
+ int ret = 0;
+ unsigned long locked, lock_limit, npages;
+ struct iommu_table *tbl = container->tbl;
+
+ if (!container->tbl)
+ return -ENXIO;
+
+ if (!current->mm)
+ return -ESRCH; /* process exited */
+
+ if (container->enabled)
+ return -EBUSY;
+
+ /*
+ * When userspace pages are mapped into the IOMMU, they are effectively
+ * locked memory, so, theoretically, we need to update the accounting
+ * of locked pages on each map and unmap. For powerpc, the map unmap
+ * paths can be very hot, though, and the accounting would kill
+ * performance, especially since it would be difficult to impossible
+ * to handle the accounting in real mode only.
+ *
+ * To address that, rather than precisely accounting every page, we
+ * instead account for a worst case on locked memory when the iommu is
+ * enabled and disabled. The worst case upper bound on locked memory
+ * is the size of the whole iommu window, which is usually relatively
+ * small (compared to total memory sizes) on POWER hardware.
+ *
+ * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
+ * that would effectively kill the guest at random points, much better
+ * enforcing the limit based on the max that the guest can map.
+ */
+ down_write(&current->mm->mmap_sem);
+ npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
+ locked = current->mm->locked_vm + npages;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
+ pr_warn("RLIMIT_MEMLOCK (%ld) exceeded\n",
+ rlimit(RLIMIT_MEMLOCK));
+ ret = -ENOMEM;
+ } else {
+
+ current->mm->locked_vm += npages;
+ container->enabled = true;
+ }
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+static void tce_iommu_disable(struct tce_container *container)
+{
+ if (!container->enabled)
+ return;
+
+ container->enabled = false;
+
+ if (!container->tbl || !current->mm)
+ return;
+
+ down_write(&current->mm->mmap_sem);
+ current->mm->locked_vm -= (container->tbl->it_size <<
+ IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
+ up_write(&current->mm->mmap_sem);
+}
+
+static void *tce_iommu_open(unsigned long arg)
+{
+ struct tce_container *container;
+
+ if (arg != VFIO_SPAPR_TCE_IOMMU) {
+ pr_err("tce_vfio: Wrong IOMMU type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(sizeof(*container), GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&container->lock);
+
+ return container;
+}
+
+static void tce_iommu_release(void *iommu_data)
+{
+ struct tce_container *container = iommu_data;
+
+ WARN_ON(container->tbl && !container->tbl->it_group);
+ tce_iommu_disable(container);
+
+ if (container->tbl && container->tbl->it_group)
+ tce_iommu_detach_group(iommu_data, container->tbl->it_group);
+
+ mutex_destroy(&container->lock);
+
+ kfree(container);
+}
+
+static long tce_iommu_ioctl(void *iommu_data,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tce_container *container = iommu_data;
+ unsigned long minsz;
+ long ret;
+
+ switch (cmd) {
+ case VFIO_CHECK_EXTENSION:
+ return (arg == VFIO_SPAPR_TCE_IOMMU) ? 1 : 0;
+
+ case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
+ struct vfio_iommu_spapr_tce_info info;
+ struct iommu_table *tbl = container->tbl;
+
+ if (WARN_ON(!tbl))
+ return -ENXIO;
+
+ minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
+ dma32_window_size);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
+ info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
+ info.flags = 0;
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+ case VFIO_IOMMU_MAP_DMA: {
+ struct vfio_iommu_type1_dma_map param;
+ struct iommu_table *tbl = container->tbl;
+ unsigned long tce, i;
+
+ if (!tbl)
+ return -ENXIO;
+
+ BUG_ON(!tbl->it_group);
+
+ minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
+
+ if (copy_from_user(&param, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (param.argsz < minsz)
+ return -EINVAL;
+
+ if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
+ VFIO_DMA_MAP_FLAG_WRITE))
+ return -EINVAL;
+
+ if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
+ (param.vaddr & ~IOMMU_PAGE_MASK_4K))
+ return -EINVAL;
+
+ /* iova is checked by the IOMMU API */
+ tce = param.vaddr;
+ if (param.flags & VFIO_DMA_MAP_FLAG_READ)
+ tce |= TCE_PCI_READ;
+ if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
+ tce |= TCE_PCI_WRITE;
+
+ ret = iommu_tce_put_param_check(tbl, param.iova, tce);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
+ ret = iommu_put_tce_user_mode(tbl,
+ (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
+ tce);
+ if (ret)
+ break;
+ tce += IOMMU_PAGE_SIZE_4K;
+ }
+ if (ret)
+ iommu_clear_tces_and_put_pages(tbl,
+ param.iova >> IOMMU_PAGE_SHIFT_4K, i);
+
+ iommu_flush_tce(tbl);
+
+ return ret;
+ }
+ case VFIO_IOMMU_UNMAP_DMA: {
+ struct vfio_iommu_type1_dma_unmap param;
+ struct iommu_table *tbl = container->tbl;
+
+ if (WARN_ON(!tbl))
+ return -ENXIO;
+
+ minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
+ size);
+
+ if (copy_from_user(&param, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (param.argsz < minsz)
+ return -EINVAL;
+
+ /* No flag is supported now */
+ if (param.flags)
+ return -EINVAL;
+
+ if (param.size & ~IOMMU_PAGE_MASK_4K)
+ return -EINVAL;
+
+ ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
+ param.size >> IOMMU_PAGE_SHIFT_4K);
+ if (ret)
+ return ret;
+
+ ret = iommu_clear_tces_and_put_pages(tbl,
+ param.iova >> IOMMU_PAGE_SHIFT_4K,
+ param.size >> IOMMU_PAGE_SHIFT_4K);
+ iommu_flush_tce(tbl);
+
+ return ret;
+ }
+ case VFIO_IOMMU_ENABLE:
+ mutex_lock(&container->lock);
+ ret = tce_iommu_enable(container);
+ mutex_unlock(&container->lock);
+ return ret;
+
+
+ case VFIO_IOMMU_DISABLE:
+ mutex_lock(&container->lock);
+ tce_iommu_disable(container);
+ mutex_unlock(&container->lock);
+ return 0;
+ }
+
+ return -ENOTTY;
+}
+
+static int tce_iommu_attach_group(void *iommu_data,
+ struct iommu_group *iommu_group)
+{
+ int ret;
+ struct tce_container *container = iommu_data;
+ struct iommu_table *tbl = iommu_group_get_iommudata(iommu_group);
+
+ BUG_ON(!tbl);
+ mutex_lock(&container->lock);
+
+ /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
+ iommu_group_id(iommu_group), iommu_group); */
+ if (container->tbl) {
+ pr_warn("tce_vfio: Only one group per IOMMU container is allowed, existing id=%d, attaching id=%d\n",
+ iommu_group_id(container->tbl->it_group),
+ iommu_group_id(iommu_group));
+ ret = -EBUSY;
+ } else if (container->enabled) {
+ pr_err("tce_vfio: attaching group #%u to enabled container\n",
+ iommu_group_id(iommu_group));
+ ret = -EBUSY;
+ } else {
+ ret = iommu_take_ownership(tbl);
+ if (!ret)
+ container->tbl = tbl;
+ }
+
+ mutex_unlock(&container->lock);
+
+ return ret;
+}
+
+static void tce_iommu_detach_group(void *iommu_data,
+ struct iommu_group *iommu_group)
+{
+ struct tce_container *container = iommu_data;
+ struct iommu_table *tbl = iommu_group_get_iommudata(iommu_group);
+
+ BUG_ON(!tbl);
+ mutex_lock(&container->lock);
+ if (tbl != container->tbl) {
+ pr_warn("tce_vfio: detaching group #%u, expected group is #%u\n",
+ iommu_group_id(iommu_group),
+ iommu_group_id(tbl->it_group));
+ } else {
+ if (container->enabled) {
+ pr_warn("tce_vfio: detaching group #%u from enabled container, forcing disable\n",
+ iommu_group_id(tbl->it_group));
+ tce_iommu_disable(container);
+ }
+
+ /* pr_debug("tce_vfio: detaching group #%u from iommu %p\n",
+ iommu_group_id(iommu_group), iommu_group); */
+ container->tbl = NULL;
+ iommu_release_ownership(tbl);
+ }
+ mutex_unlock(&container->lock);
+}
+
+const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
+ .name = "iommu-vfio-powerpc",
+ .owner = THIS_MODULE,
+ .open = tce_iommu_open,
+ .release = tce_iommu_release,
+ .ioctl = tce_iommu_ioctl,
+ .attach_group = tce_iommu_attach_group,
+ .detach_group = tce_iommu_detach_group,
+};
+
+static int __init tce_iommu_init(void)
+{
+ return vfio_register_iommu_driver(&tce_iommu_driver_ops);
+}
+
+static void __exit tce_iommu_cleanup(void)
+{
+ vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
+}
+
+module_init(tce_iommu_init);
+module_exit(tce_iommu_cleanup);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 6f3fbc48a6c..0734fbe5b65 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -30,7 +30,7 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/pci.h> /* pci_bus_type */
+#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -47,19 +47,31 @@ module_param_named(allow_unsafe_interrupts,
MODULE_PARM_DESC(allow_unsafe_interrupts,
"Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
+static bool disable_hugepages;
+module_param_named(disable_hugepages,
+ disable_hugepages, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_hugepages,
+ "Disable VFIO IOMMU support for IOMMU hugepages.");
+
struct vfio_iommu {
- struct iommu_domain *domain;
+ struct list_head domain_list;
struct mutex lock;
- struct list_head dma_list;
+ struct rb_root dma_list;
+ bool v2;
+};
+
+struct vfio_domain {
+ struct iommu_domain *domain;
+ struct list_head next;
struct list_head group_list;
- bool cache;
+ int prot; /* IOMMU_CACHE */
};
struct vfio_dma {
- struct list_head next;
+ struct rb_node node;
dma_addr_t iova; /* Device address */
unsigned long vaddr; /* Process virtual addr */
- long npage; /* Number of pages */
+ size_t size; /* Map size (bytes) */
int prot; /* IOMMU_READ/WRITE */
};
@@ -73,7 +85,48 @@ struct vfio_group {
* into DMA'ble space using the IOMMU
*/
-#define NPAGE_TO_SIZE(npage) ((size_t)(npage) << PAGE_SHIFT)
+static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
+ dma_addr_t start, size_t size)
+{
+ struct rb_node *node = iommu->dma_list.rb_node;
+
+ while (node) {
+ struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
+
+ if (start + size <= dma->iova)
+ node = node->rb_left;
+ else if (start >= dma->iova + dma->size)
+ node = node->rb_right;
+ else
+ return dma;
+ }
+
+ return NULL;
+}
+
+static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
+{
+ struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
+ struct vfio_dma *dma;
+
+ while (*link) {
+ parent = *link;
+ dma = rb_entry(parent, struct vfio_dma, node);
+
+ if (new->iova + new->size <= dma->iova)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &iommu->dma_list);
+}
+
+static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
+{
+ rb_erase(&old->node, &iommu->dma_list);
+}
struct vwork {
struct mm_struct *mm;
@@ -100,8 +153,8 @@ static void vfio_lock_acct(long npage)
struct vwork *vwork;
struct mm_struct *mm;
- if (!current->mm)
- return; /* process exited */
+ if (!current->mm || !npage)
+ return; /* process exited or nothing to do */
if (down_write_trylock(&current->mm->mmap_sem)) {
current->mm->locked_vm += npage;
@@ -138,12 +191,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn)
if (pfn_valid(pfn)) {
bool reserved;
struct page *tail = pfn_to_page(pfn);
- struct page *head = compound_trans_head(tail);
+ struct page *head = compound_head(tail);
reserved = !!(PageReserved(head));
if (head != tail) {
/*
* "head" is not a dangling pointer
- * (compound_trans_head takes care of that)
+ * (compound_head takes care of that)
* but the hugepage may have been split
* from under us (and we may not hold a
* reference count on the head page so it can
@@ -173,33 +226,6 @@ static int put_pfn(unsigned long pfn, int prot)
return 0;
}
-/* Unmap DMA region */
-static long __vfio_dma_do_unmap(struct vfio_iommu *iommu, dma_addr_t iova,
- long npage, int prot)
-{
- long i, unlocked = 0;
-
- for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
- unsigned long pfn;
-
- pfn = iommu_iova_to_phys(iommu->domain, iova) >> PAGE_SHIFT;
- if (pfn) {
- iommu_unmap(iommu->domain, iova, PAGE_SIZE);
- unlocked += put_pfn(pfn, prot);
- }
- }
- return unlocked;
-}
-
-static void vfio_dma_unmap(struct vfio_iommu *iommu, dma_addr_t iova,
- long npage, int prot)
-{
- long unlocked;
-
- unlocked = __vfio_dma_do_unmap(iommu, iova, npage, prot);
- vfio_lock_acct(-unlocked);
-}
-
static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
{
struct page *page[1];
@@ -226,200 +252,294 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
return ret;
}
-/* Map DMA region */
-static int __vfio_dma_map(struct vfio_iommu *iommu, dma_addr_t iova,
- unsigned long vaddr, long npage, int prot)
+/*
+ * Attempt to pin pages. We really don't want to track all the pfns and
+ * the iommu can only map chunks of consecutive pfns anyway, so get the
+ * first page and all consecutive pages with the same locking.
+ */
+static long vfio_pin_pages(unsigned long vaddr, long npage,
+ int prot, unsigned long *pfn_base)
{
- dma_addr_t start = iova;
- long i, locked = 0;
- int ret;
+ unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ bool lock_cap = capable(CAP_IPC_LOCK);
+ long ret, i;
- /* Verify that pages are not already mapped */
- for (i = 0; i < npage; i++, iova += PAGE_SIZE)
- if (iommu_iova_to_phys(iommu->domain, iova))
- return -EBUSY;
+ if (!current->mm)
+ return -ENODEV;
- iova = start;
+ ret = vaddr_get_pfn(vaddr, prot, pfn_base);
+ if (ret)
+ return ret;
- if (iommu->cache)
- prot |= IOMMU_CACHE;
+ if (is_invalid_reserved_pfn(*pfn_base))
+ return 1;
- /*
- * XXX We break mappings into pages and use get_user_pages_fast to
- * pin the pages in memory. It's been suggested that mlock might
- * provide a more efficient mechanism, but nothing prevents the
- * user from munlocking the pages, which could then allow the user
- * access to random host memory. We also have no guarantee from the
- * IOMMU API that the iommu driver can unmap sub-pages of previous
- * mappings. This means we might lose an entire range if a single
- * page within it is unmapped. Single page mappings are inefficient,
- * but provide the most flexibility for now.
- */
- for (i = 0; i < npage; i++, iova += PAGE_SIZE, vaddr += PAGE_SIZE) {
+ if (!lock_cap && current->mm->locked_vm + 1 > limit) {
+ put_pfn(*pfn_base, prot);
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
+ limit << PAGE_SHIFT);
+ return -ENOMEM;
+ }
+
+ if (unlikely(disable_hugepages)) {
+ vfio_lock_acct(1);
+ return 1;
+ }
+
+ /* Lock all the consecutive pages from pfn_base */
+ for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
unsigned long pfn = 0;
ret = vaddr_get_pfn(vaddr, prot, &pfn);
- if (ret) {
- __vfio_dma_do_unmap(iommu, start, i, prot);
- return ret;
+ if (ret)
+ break;
+
+ if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) {
+ put_pfn(pfn, prot);
+ break;
}
- /*
- * Only add actual locked pages to accounting
- * XXX We're effectively marking a page locked for every
- * IOVA page even though it's possible the user could be
- * backing multiple IOVAs with the same vaddr. This over-
- * penalizes the user process, but we currently have no
- * easy way to do this properly.
- */
- if (!is_invalid_reserved_pfn(pfn))
- locked++;
-
- ret = iommu_map(iommu->domain, iova,
- (phys_addr_t)pfn << PAGE_SHIFT,
- PAGE_SIZE, prot);
- if (ret) {
- /* Back out mappings on error */
+ if (!lock_cap && current->mm->locked_vm + i + 1 > limit) {
put_pfn(pfn, prot);
- __vfio_dma_do_unmap(iommu, start, i, prot);
- return ret;
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
+ __func__, limit << PAGE_SHIFT);
+ break;
}
}
- vfio_lock_acct(locked);
- return 0;
-}
-static inline bool ranges_overlap(dma_addr_t start1, size_t size1,
- dma_addr_t start2, size_t size2)
-{
- if (start1 < start2)
- return (start2 - start1 < size1);
- else if (start2 < start1)
- return (start1 - start2 < size2);
- return (size1 > 0 && size2 > 0);
+ vfio_lock_acct(i);
+
+ return i;
}
-static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
- dma_addr_t start, size_t size)
+static long vfio_unpin_pages(unsigned long pfn, long npage,
+ int prot, bool do_accounting)
{
- struct vfio_dma *dma;
+ unsigned long unlocked = 0;
+ long i;
- list_for_each_entry(dma, &iommu->dma_list, next) {
- if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage),
- start, size))
- return dma;
- }
- return NULL;
+ for (i = 0; i < npage; i++)
+ unlocked += put_pfn(pfn++, prot);
+
+ if (do_accounting)
+ vfio_lock_acct(-unlocked);
+
+ return unlocked;
}
-static long vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start,
- size_t size, struct vfio_dma *dma)
+static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
{
- struct vfio_dma *split;
- long npage_lo, npage_hi;
-
- /* Existing dma region is completely covered, unmap all */
- if (start <= dma->iova &&
- start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) {
- vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot);
- list_del(&dma->next);
- npage_lo = dma->npage;
- kfree(dma);
- return npage_lo;
- }
+ dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
+ struct vfio_domain *domain, *d;
+ long unlocked = 0;
- /* Overlap low address of existing range */
- if (start <= dma->iova) {
- size_t overlap;
+ if (!dma->size)
+ return;
+ /*
+ * We use the IOMMU to track the physical addresses, otherwise we'd
+ * need a much more complicated tracking system. Unfortunately that
+ * means we need to use one of the iommu domains to figure out the
+ * pfns to unpin. The rest need to be unmapped in advance so we have
+ * no iommu translations remaining when the pages are unpinned.
+ */
+ domain = d = list_first_entry(&iommu->domain_list,
+ struct vfio_domain, next);
- overlap = start + size - dma->iova;
- npage_lo = overlap >> PAGE_SHIFT;
+ list_for_each_entry_continue(d, &iommu->domain_list, next)
+ iommu_unmap(d->domain, dma->iova, dma->size);
- vfio_dma_unmap(iommu, dma->iova, npage_lo, dma->prot);
- dma->iova += overlap;
- dma->vaddr += overlap;
- dma->npage -= npage_lo;
- return npage_lo;
- }
+ while (iova < end) {
+ size_t unmapped;
+ phys_addr_t phys;
- /* Overlap high address of existing range */
- if (start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) {
- size_t overlap;
+ phys = iommu_iova_to_phys(domain->domain, iova);
+ if (WARN_ON(!phys)) {
+ iova += PAGE_SIZE;
+ continue;
+ }
- overlap = dma->iova + NPAGE_TO_SIZE(dma->npage) - start;
- npage_hi = overlap >> PAGE_SHIFT;
+ unmapped = iommu_unmap(domain->domain, iova, PAGE_SIZE);
+ if (WARN_ON(!unmapped))
+ break;
- vfio_dma_unmap(iommu, start, npage_hi, dma->prot);
- dma->npage -= npage_hi;
- return npage_hi;
+ unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT,
+ unmapped >> PAGE_SHIFT,
+ dma->prot, false);
+ iova += unmapped;
}
- /* Split existing */
- npage_lo = (start - dma->iova) >> PAGE_SHIFT;
- npage_hi = dma->npage - (size >> PAGE_SHIFT) - npage_lo;
+ vfio_lock_acct(-unlocked);
+}
- split = kzalloc(sizeof *split, GFP_KERNEL);
- if (!split)
- return -ENOMEM;
+static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
+{
+ vfio_unmap_unpin(iommu, dma);
+ vfio_unlink_dma(iommu, dma);
+ kfree(dma);
+}
- vfio_dma_unmap(iommu, start, size >> PAGE_SHIFT, dma->prot);
+static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
+{
+ struct vfio_domain *domain;
+ unsigned long bitmap = PAGE_MASK;
- dma->npage = npage_lo;
+ mutex_lock(&iommu->lock);
+ list_for_each_entry(domain, &iommu->domain_list, next)
+ bitmap &= domain->domain->ops->pgsize_bitmap;
+ mutex_unlock(&iommu->lock);
- split->npage = npage_hi;
- split->iova = start + size;
- split->vaddr = dma->vaddr + NPAGE_TO_SIZE(npage_lo) + size;
- split->prot = dma->prot;
- list_add(&split->next, &iommu->dma_list);
- return size >> PAGE_SHIFT;
+ return bitmap;
}
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_unmap *unmap)
{
- long ret = 0, npage = unmap->size >> PAGE_SHIFT;
- struct vfio_dma *dma, *tmp;
uint64_t mask;
+ struct vfio_dma *dma;
+ size_t unmapped = 0;
+ int ret = 0;
- mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
+ mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
if (unmap->iova & mask)
return -EINVAL;
- if (unmap->size & mask)
+ if (!unmap->size || unmap->size & mask)
return -EINVAL;
- /* XXX We still break these down into PAGE_SIZE */
WARN_ON(mask & PAGE_MASK);
mutex_lock(&iommu->lock);
- list_for_each_entry_safe(dma, tmp, &iommu->dma_list, next) {
- if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage),
- unmap->iova, unmap->size)) {
- ret = vfio_remove_dma_overlap(iommu, unmap->iova,
- unmap->size, dma);
- if (ret > 0)
- npage -= ret;
- if (ret < 0 || npage == 0)
- break;
+ /*
+ * vfio-iommu-type1 (v1) - User mappings were coalesced together to
+ * avoid tracking individual mappings. This means that the granularity
+ * of the original mapping was lost and the user was allowed to attempt
+ * to unmap any range. Depending on the contiguousness of physical
+ * memory and page sizes supported by the IOMMU, arbitrary unmaps may
+ * or may not have worked. We only guaranteed unmap granularity
+ * matching the original mapping; even though it was untracked here,
+ * the original mappings are reflected in IOMMU mappings. This
+ * resulted in a couple unusual behaviors. First, if a range is not
+ * able to be unmapped, ex. a set of 4k pages that was mapped as a
+ * 2M hugepage into the IOMMU, the unmap ioctl returns success but with
+ * a zero sized unmap. Also, if an unmap request overlaps the first
+ * address of a hugepage, the IOMMU will unmap the entire hugepage.
+ * This also returns success and the returned unmap size reflects the
+ * actual size unmapped.
+ *
+ * We attempt to maintain compatibility with this "v1" interface, but
+ * we take control out of the hands of the IOMMU. Therefore, an unmap
+ * request offset from the beginning of the original mapping will
+ * return success with zero sized unmap. And an unmap request covering
+ * the first iova of mapping will unmap the entire range.
+ *
+ * The v2 version of this interface intends to be more deterministic.
+ * Unmap requests must fully cover previous mappings. Multiple
+ * mappings may still be unmaped by specifying large ranges, but there
+ * must not be any previous mappings bisected by the range. An error
+ * will be returned if these conditions are not met. The v2 interface
+ * will only return success and a size of zero if there were no
+ * mappings within the range.
+ */
+ if (iommu->v2) {
+ dma = vfio_find_dma(iommu, unmap->iova, 0);
+ if (dma && dma->iova != unmap->iova) {
+ ret = -EINVAL;
+ goto unlock;
}
+ dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
+ if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ }
+
+ while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
+ if (!iommu->v2 && unmap->iova > dma->iova)
+ break;
+ unmapped += dma->size;
+ vfio_remove_dma(iommu, dma);
}
+
+unlock:
mutex_unlock(&iommu->lock);
- return ret > 0 ? 0 : (int)ret;
+
+ /* Report how much was unmapped */
+ unmap->size = unmapped;
+
+ return ret;
+}
+
+/*
+ * Turns out AMD IOMMU has a page table bug where it won't map large pages
+ * to a region that previously mapped smaller pages. This should be fixed
+ * soon, so this is just a temporary workaround to break mappings down into
+ * PAGE_SIZE. Better to map smaller pages than nothing.
+ */
+static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
+ unsigned long pfn, long npage, int prot)
+{
+ long i;
+ int ret;
+
+ for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
+ ret = iommu_map(domain->domain, iova,
+ (phys_addr_t)pfn << PAGE_SHIFT,
+ PAGE_SIZE, prot | domain->prot);
+ if (ret)
+ break;
+ }
+
+ for (; i < npage && i > 0; i--, iova -= PAGE_SIZE)
+ iommu_unmap(domain->domain, iova, PAGE_SIZE);
+
+ return ret;
+}
+
+static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
+ unsigned long pfn, long npage, int prot)
+{
+ struct vfio_domain *d;
+ int ret;
+
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
+ npage << PAGE_SHIFT, prot | d->prot);
+ if (ret) {
+ if (ret != -EBUSY ||
+ map_try_harder(d, iova, pfn, npage, prot))
+ goto unwind;
+ }
+ }
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
+ iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
+
+ return ret;
}
static int vfio_dma_do_map(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_map *map)
{
- struct vfio_dma *dma, *pdma = NULL;
dma_addr_t iova = map->iova;
- unsigned long locked, lock_limit, vaddr = map->vaddr;
+ unsigned long vaddr = map->vaddr;
size_t size = map->size;
+ long npage;
int ret = 0, prot = 0;
uint64_t mask;
- long npage;
+ struct vfio_dma *dma;
+ unsigned long pfn;
- mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
+ /* Verify that none of our __u64 fields overflow */
+ if (map->size != size || map->vaddr != vaddr || map->iova != iova)
+ return -EINVAL;
+
+ mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
+
+ WARN_ON(mask & PAGE_MASK);
/* READ/WRITE from device perspective */
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
@@ -427,170 +547,271 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
prot |= IOMMU_READ;
- if (!prot)
- return -EINVAL; /* No READ/WRITE? */
-
- if (vaddr & mask)
- return -EINVAL;
- if (iova & mask)
- return -EINVAL;
- if (size & mask)
+ if (!prot || !size || (size | iova | vaddr) & mask)
return -EINVAL;
- /* XXX We still break these down into PAGE_SIZE */
- WARN_ON(mask & PAGE_MASK);
-
- /* Don't allow IOVA wrap */
- if (iova + size && iova + size < iova)
- return -EINVAL;
-
- /* Don't allow virtual address wrap */
- if (vaddr + size && vaddr + size < vaddr)
- return -EINVAL;
-
- npage = size >> PAGE_SHIFT;
- if (!npage)
+ /* Don't allow IOVA or virtual address wrap */
+ if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
return -EINVAL;
mutex_lock(&iommu->lock);
if (vfio_find_dma(iommu, iova, size)) {
- ret = -EBUSY;
- goto out_lock;
+ mutex_unlock(&iommu->lock);
+ return -EEXIST;
}
- /* account for locked pages */
- locked = current->mm->locked_vm + npage;
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
- pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
- __func__, rlimit(RLIMIT_MEMLOCK));
- ret = -ENOMEM;
- goto out_lock;
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma) {
+ mutex_unlock(&iommu->lock);
+ return -ENOMEM;
}
- ret = __vfio_dma_map(iommu, iova, vaddr, npage, prot);
- if (ret)
- goto out_lock;
+ dma->iova = iova;
+ dma->vaddr = vaddr;
+ dma->prot = prot;
- /* Check if we abut a region below - nothing below 0 */
- if (iova) {
- dma = vfio_find_dma(iommu, iova - 1, 1);
- if (dma && dma->prot == prot &&
- dma->vaddr + NPAGE_TO_SIZE(dma->npage) == vaddr) {
+ /* Insert zero-sized and grow as we map chunks of it */
+ vfio_link_dma(iommu, dma);
- dma->npage += npage;
- iova = dma->iova;
- vaddr = dma->vaddr;
- npage = dma->npage;
- size = NPAGE_TO_SIZE(npage);
+ while (size) {
+ /* Pin a contiguous chunk of memory */
+ npage = vfio_pin_pages(vaddr + dma->size,
+ size >> PAGE_SHIFT, prot, &pfn);
+ if (npage <= 0) {
+ WARN_ON(!npage);
+ ret = (int)npage;
+ break;
+ }
- pdma = dma;
+ /* Map it! */
+ ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot);
+ if (ret) {
+ vfio_unpin_pages(pfn, npage, prot, true);
+ break;
}
+
+ size -= npage << PAGE_SHIFT;
+ dma->size += npage << PAGE_SHIFT;
}
- /* Check if we abut a region above - nothing above ~0 + 1 */
- if (iova + size) {
- dma = vfio_find_dma(iommu, iova + size, 1);
- if (dma && dma->prot == prot &&
- dma->vaddr == vaddr + size) {
+ if (ret)
+ vfio_remove_dma(iommu, dma);
+
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
- dma->npage += npage;
- dma->iova = iova;
- dma->vaddr = vaddr;
+static int vfio_bus_type(struct device *dev, void *data)
+{
+ struct bus_type **bus = data;
- /*
- * If merged above and below, remove previously
- * merged entry. New entry covers it.
- */
- if (pdma) {
- list_del(&pdma->next);
- kfree(pdma);
+ if (*bus && *bus != dev->bus)
+ return -EINVAL;
+
+ *bus = dev->bus;
+
+ return 0;
+}
+
+static int vfio_iommu_replay(struct vfio_iommu *iommu,
+ struct vfio_domain *domain)
+{
+ struct vfio_domain *d;
+ struct rb_node *n;
+ int ret;
+
+ /* Arbitrarily pick the first domain in the list for lookups */
+ d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
+ n = rb_first(&iommu->dma_list);
+
+ /* If there's not a domain, there better not be any mappings */
+ if (WARN_ON(n && !d))
+ return -EINVAL;
+
+ for (; n; n = rb_next(n)) {
+ struct vfio_dma *dma;
+ dma_addr_t iova;
+
+ dma = rb_entry(n, struct vfio_dma, node);
+ iova = dma->iova;
+
+ while (iova < dma->iova + dma->size) {
+ phys_addr_t phys = iommu_iova_to_phys(d->domain, iova);
+ size_t size;
+
+ if (WARN_ON(!phys)) {
+ iova += PAGE_SIZE;
+ continue;
}
- pdma = dma;
- }
- }
- /* Isolated, new region */
- if (!pdma) {
- dma = kzalloc(sizeof *dma, GFP_KERNEL);
- if (!dma) {
- ret = -ENOMEM;
- vfio_dma_unmap(iommu, iova, npage, prot);
- goto out_lock;
- }
+ size = PAGE_SIZE;
+
+ while (iova + size < dma->iova + dma->size &&
+ phys + size == iommu_iova_to_phys(d->domain,
+ iova + size))
+ size += PAGE_SIZE;
- dma->npage = npage;
- dma->iova = iova;
- dma->vaddr = vaddr;
- dma->prot = prot;
- list_add(&dma->next, &iommu->dma_list);
+ ret = iommu_map(domain->domain, iova, phys,
+ size, dma->prot | domain->prot);
+ if (ret)
+ return ret;
+
+ iova += size;
+ }
}
-out_lock:
- mutex_unlock(&iommu->lock);
- return ret;
+ return 0;
}
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct vfio_iommu *iommu = iommu_data;
- struct vfio_group *group, *tmp;
+ struct vfio_group *group, *g;
+ struct vfio_domain *domain, *d;
+ struct bus_type *bus = NULL;
int ret;
- group = kzalloc(sizeof(*group), GFP_KERNEL);
- if (!group)
- return -ENOMEM;
-
mutex_lock(&iommu->lock);
- list_for_each_entry(tmp, &iommu->group_list, next) {
- if (tmp->iommu_group == iommu_group) {
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ list_for_each_entry(g, &d->group_list, next) {
+ if (g->iommu_group != iommu_group)
+ continue;
+
mutex_unlock(&iommu->lock);
- kfree(group);
return -EINVAL;
}
}
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!group || !domain) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ group->iommu_group = iommu_group;
+
+ /* Determine bus_type in order to allocate a domain */
+ ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
+ if (ret)
+ goto out_free;
+
+ domain->domain = iommu_domain_alloc(bus);
+ if (!domain->domain) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ ret = iommu_attach_group(domain->domain, iommu_group);
+ if (ret)
+ goto out_domain;
+
+ INIT_LIST_HEAD(&domain->group_list);
+ list_add(&group->next, &domain->group_list);
+
+ if (!allow_unsafe_interrupts &&
+ !iommu_domain_has_cap(domain->domain, IOMMU_CAP_INTR_REMAP)) {
+ pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
+ __func__);
+ ret = -EPERM;
+ goto out_detach;
+ }
+
+ if (iommu_domain_has_cap(domain->domain, IOMMU_CAP_CACHE_COHERENCY))
+ domain->prot |= IOMMU_CACHE;
+
/*
- * TODO: Domain have capabilities that might change as we add
- * groups (see iommu->cache, currently never set). Check for
- * them and potentially disallow groups to be attached when it
- * would change capabilities (ugh).
+ * Try to match an existing compatible domain. We don't want to
+ * preclude an IOMMU driver supporting multiple bus_types and being
+ * able to include different bus_types in the same IOMMU domain, so
+ * we test whether the domains use the same iommu_ops rather than
+ * testing if they're on the same bus_type.
*/
- ret = iommu_attach_group(iommu->domain, iommu_group);
- if (ret) {
- mutex_unlock(&iommu->lock);
- kfree(group);
- return ret;
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ if (d->domain->ops == domain->domain->ops &&
+ d->prot == domain->prot) {
+ iommu_detach_group(domain->domain, iommu_group);
+ if (!iommu_attach_group(d->domain, iommu_group)) {
+ list_add(&group->next, &d->group_list);
+ iommu_domain_free(domain->domain);
+ kfree(domain);
+ mutex_unlock(&iommu->lock);
+ return 0;
+ }
+
+ ret = iommu_attach_group(domain->domain, iommu_group);
+ if (ret)
+ goto out_domain;
+ }
}
- group->iommu_group = iommu_group;
- list_add(&group->next, &iommu->group_list);
+ /* replay mappings on new domains */
+ ret = vfio_iommu_replay(iommu, domain);
+ if (ret)
+ goto out_detach;
+
+ list_add(&domain->next, &iommu->domain_list);
mutex_unlock(&iommu->lock);
return 0;
+
+out_detach:
+ iommu_detach_group(domain->domain, iommu_group);
+out_domain:
+ iommu_domain_free(domain->domain);
+out_free:
+ kfree(domain);
+ kfree(group);
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
+
+static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
+{
+ struct rb_node *node;
+
+ while ((node = rb_first(&iommu->dma_list)))
+ vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
}
static void vfio_iommu_type1_detach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct vfio_iommu *iommu = iommu_data;
+ struct vfio_domain *domain;
struct vfio_group *group;
mutex_lock(&iommu->lock);
- list_for_each_entry(group, &iommu->group_list, next) {
- if (group->iommu_group == iommu_group) {
- iommu_detach_group(iommu->domain, iommu_group);
+ list_for_each_entry(domain, &iommu->domain_list, next) {
+ list_for_each_entry(group, &domain->group_list, next) {
+ if (group->iommu_group != iommu_group)
+ continue;
+
+ iommu_detach_group(domain->domain, iommu_group);
list_del(&group->next);
kfree(group);
- break;
+ /*
+ * Group ownership provides privilege, if the group
+ * list is empty, the domain goes away. If it's the
+ * last domain, then all the mappings go away too.
+ */
+ if (list_empty(&domain->group_list)) {
+ if (list_is_singular(&iommu->domain_list))
+ vfio_iommu_unmap_unpin_all(iommu);
+ iommu_domain_free(domain->domain);
+ list_del(&domain->next);
+ kfree(domain);
+ }
+ goto done;
}
}
+done:
mutex_unlock(&iommu->lock);
}
@@ -598,40 +819,17 @@ static void *vfio_iommu_type1_open(unsigned long arg)
{
struct vfio_iommu *iommu;
- if (arg != VFIO_TYPE1_IOMMU)
+ if (arg != VFIO_TYPE1_IOMMU && arg != VFIO_TYPE1v2_IOMMU)
return ERR_PTR(-EINVAL);
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&iommu->group_list);
- INIT_LIST_HEAD(&iommu->dma_list);
+ INIT_LIST_HEAD(&iommu->domain_list);
+ iommu->dma_list = RB_ROOT;
mutex_init(&iommu->lock);
-
- /*
- * Wish we didn't have to know about bus_type here.
- */
- iommu->domain = iommu_domain_alloc(&pci_bus_type);
- if (!iommu->domain) {
- kfree(iommu);
- return ERR_PTR(-EIO);
- }
-
- /*
- * Wish we could specify required capabilities rather than create
- * a domain, see what comes out and hope it doesn't change along
- * the way. Fortunately we know interrupt remapping is global for
- * our iommus.
- */
- if (!allow_unsafe_interrupts &&
- !iommu_domain_has_cap(iommu->domain, IOMMU_CAP_INTR_REMAP)) {
- pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
- __func__);
- iommu_domain_free(iommu->domain);
- kfree(iommu);
- return ERR_PTR(-EPERM);
- }
+ iommu->v2 = (arg == VFIO_TYPE1v2_IOMMU);
return iommu;
}
@@ -639,26 +837,44 @@ static void *vfio_iommu_type1_open(unsigned long arg)
static void vfio_iommu_type1_release(void *iommu_data)
{
struct vfio_iommu *iommu = iommu_data;
+ struct vfio_domain *domain, *domain_tmp;
struct vfio_group *group, *group_tmp;
- struct vfio_dma *dma, *dma_tmp;
- list_for_each_entry_safe(group, group_tmp, &iommu->group_list, next) {
- iommu_detach_group(iommu->domain, group->iommu_group);
- list_del(&group->next);
- kfree(group);
- }
+ vfio_iommu_unmap_unpin_all(iommu);
- list_for_each_entry_safe(dma, dma_tmp, &iommu->dma_list, next) {
- vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot);
- list_del(&dma->next);
- kfree(dma);
+ list_for_each_entry_safe(domain, domain_tmp,
+ &iommu->domain_list, next) {
+ list_for_each_entry_safe(group, group_tmp,
+ &domain->group_list, next) {
+ iommu_detach_group(domain->domain, group->iommu_group);
+ list_del(&group->next);
+ kfree(group);
+ }
+ iommu_domain_free(domain->domain);
+ list_del(&domain->next);
+ kfree(domain);
}
- iommu_domain_free(iommu->domain);
- iommu->domain = NULL;
kfree(iommu);
}
+static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
+{
+ struct vfio_domain *domain;
+ int ret = 1;
+
+ mutex_lock(&iommu->lock);
+ list_for_each_entry(domain, &iommu->domain_list, next) {
+ if (!(domain->prot & IOMMU_CACHE)) {
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&iommu->lock);
+
+ return ret;
+}
+
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -668,7 +884,12 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
if (cmd == VFIO_CHECK_EXTENSION) {
switch (arg) {
case VFIO_TYPE1_IOMMU:
+ case VFIO_TYPE1v2_IOMMU:
return 1;
+ case VFIO_DMA_CC_IOMMU:
+ if (!iommu)
+ return 0;
+ return vfio_domains_have_iommu_cache(iommu);
default:
return 0;
}
@@ -685,7 +906,7 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
info.flags = 0;
- info.iova_pgsizes = iommu->domain->ops->pgsize_bitmap;
+ info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
return copy_to_user((void __user *)arg, &info, minsz);
@@ -706,6 +927,7 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
} else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
struct vfio_iommu_type1_dma_unmap unmap;
+ long ret;
minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
@@ -715,7 +937,11 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
if (unmap.argsz < minsz || unmap.flags)
return -EINVAL;
- return vfio_dma_do_unmap(iommu, &unmap);
+ ret = vfio_dma_do_unmap(iommu, &unmap);
+ if (ret)
+ return ret;
+
+ return copy_to_user((void __user *)arg, &unmap, minsz);
}
return -ENOTTY;
@@ -733,9 +959,6 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
static int __init vfio_iommu_type1_init(void)
{
- if (!iommu_present(&pci_bus_type))
- return -ENODEV;
-
return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
}