diff options
Diffstat (limited to 'drivers/vfio')
| -rw-r--r-- | drivers/vfio/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/vfio/pci/vfio_pci.c | 321 | ||||
| -rw-r--r-- | drivers/vfio/pci/vfio_pci_config.c | 30 | ||||
| -rw-r--r-- | drivers/vfio/pci/vfio_pci_intrs.c | 64 | ||||
| -rw-r--r-- | drivers/vfio/vfio.c | 148 | ||||
| -rw-r--r-- | drivers/vfio/vfio_iommu_spapr_tce.c | 28 | ||||
| -rw-r--r-- | drivers/vfio/vfio_iommu_type1.c | 697 |
7 files changed, 830 insertions, 459 deletions
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index 26b3d9d1409..af7b204b921 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -13,6 +13,7 @@ menuconfig VFIO depends on IOMMU_API select VFIO_IOMMU_TYPE1 if X86 select VFIO_IOMMU_SPAPR_TCE if (PPC_POWERNV || PPC_PSERIES) + select ANON_INODES help VFIO provides a framework for secure userspace device drivers. See Documentation/vfio.txt for more details. diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index cef6002acbd..010e0f8b8e4 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -13,6 +13,7 @@ #include <linux/device.h> #include <linux/eventfd.h> +#include <linux/file.h> #include <linux/interrupt.h> #include <linux/iommu.h> #include <linux/module.h> @@ -56,7 +57,8 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) ret = vfio_config_init(vdev); if (ret) { - pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state); + kfree(vdev->pci_saved_state); + vdev->pci_saved_state = NULL; pci_disable_device(pdev); return ret; } @@ -138,25 +140,14 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); /* - * Careful, device_lock may already be held. This is the case if - * a driver unbind is blocked. Try to get the locks ourselves to - * prevent a deadlock. + * Try to reset the device. The success of this is dependent on + * being able to lock the device, which is not always possible. */ if (vdev->reset_works) { - bool reset_done = false; - - if (pci_cfg_access_trylock(pdev)) { - if (device_trylock(&pdev->dev)) { - __pci_reset_function_locked(pdev); - reset_done = true; - device_unlock(&pdev->dev); - } - pci_cfg_access_unlock(pdev); - } - - if (!reset_done) - pr_warn("%s: Unable to acquire locks for reset of %s\n", - __func__, dev_name(&pdev->dev)); + int ret = pci_try_reset_function(pdev); + if (ret) + pr_warn("%s: Failed to reset device %s (%d)\n", + __func__, dev_name(&pdev->dev), ret); } pci_restore_state(pdev); @@ -206,8 +197,7 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) if (pos) { pci_read_config_word(vdev->pdev, pos + PCI_MSI_FLAGS, &flags); - - return 1 << (flags & PCI_MSI_FLAGS_QMASK); + return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1); } } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) { u8 pos; @@ -227,6 +217,110 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) return 0; } +static int vfio_pci_count_devs(struct pci_dev *pdev, void *data) +{ + (*(int *)data)++; + return 0; +} + +struct vfio_pci_fill_info { + int max; + int cur; + struct vfio_pci_dependent_device *devices; +}; + +static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data) +{ + struct vfio_pci_fill_info *fill = data; + struct iommu_group *iommu_group; + + if (fill->cur == fill->max) + return -EAGAIN; /* Something changed, try again */ + + iommu_group = iommu_group_get(&pdev->dev); + if (!iommu_group) + return -EPERM; /* Cannot reset non-isolated devices */ + + fill->devices[fill->cur].group_id = iommu_group_id(iommu_group); + fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus); + fill->devices[fill->cur].bus = pdev->bus->number; + fill->devices[fill->cur].devfn = pdev->devfn; + fill->cur++; + iommu_group_put(iommu_group); + return 0; +} + +struct vfio_pci_group_entry { + struct vfio_group *group; + int id; +}; + +struct vfio_pci_group_info { + int count; + struct vfio_pci_group_entry *groups; +}; + +static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data) +{ + struct vfio_pci_group_info *info = data; + struct iommu_group *group; + int id, i; + + group = iommu_group_get(&pdev->dev); + if (!group) + return -EPERM; + + id = iommu_group_id(group); + + for (i = 0; i < info->count; i++) + if (info->groups[i].id == id) + break; + + iommu_group_put(group); + + return (i == info->count) ? -EINVAL : 0; +} + +static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot) +{ + for (; pdev; pdev = pdev->bus->self) + if (pdev->bus == slot->bus) + return (pdev->slot == slot); + return false; +} + +struct vfio_pci_walk_info { + int (*fn)(struct pci_dev *, void *data); + void *data; + struct pci_dev *pdev; + bool slot; + int ret; +}; + +static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data) +{ + struct vfio_pci_walk_info *walk = data; + + if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot)) + walk->ret = walk->fn(pdev, walk->data); + + return walk->ret; +} + +static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev, + int (*fn)(struct pci_dev *, + void *data), void *data, + bool slot) +{ + struct vfio_pci_walk_info walk = { + .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0, + }; + + pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk); + + return walk.ret; +} + static long vfio_pci_ioctl(void *device_data, unsigned int cmd, unsigned long arg) { @@ -407,9 +501,188 @@ static long vfio_pci_ioctl(void *device_data, return ret; - } else if (cmd == VFIO_DEVICE_RESET) + } else if (cmd == VFIO_DEVICE_RESET) { return vdev->reset_works ? - pci_reset_function(vdev->pdev) : -EINVAL; + pci_try_reset_function(vdev->pdev) : -EINVAL; + + } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { + struct vfio_pci_hot_reset_info hdr; + struct vfio_pci_fill_info fill = { 0 }; + struct vfio_pci_dependent_device *devices = NULL; + bool slot = false; + int ret = 0; + + minsz = offsetofend(struct vfio_pci_hot_reset_info, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + if (hdr.argsz < minsz) + return -EINVAL; + + hdr.flags = 0; + + /* Can we do a slot or bus reset or neither? */ + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return -ENODEV; + + /* How many devices are affected? */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_count_devs, + &fill.max, slot); + if (ret) + return ret; + + WARN_ON(!fill.max); /* Should always be at least one */ + + /* + * If there's enough space, fill it now, otherwise return + * -ENOSPC and the number of devices affected. + */ + if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) { + ret = -ENOSPC; + hdr.count = fill.max; + goto reset_info_exit; + } + + devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL); + if (!devices) + return -ENOMEM; + + fill.devices = devices; + + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_fill_devs, + &fill, slot); + + /* + * If a device was removed between counting and filling, + * we may come up short of fill.max. If a device was + * added, we'll have a return of -EAGAIN above. + */ + if (!ret) + hdr.count = fill.cur; + +reset_info_exit: + if (copy_to_user((void __user *)arg, &hdr, minsz)) + ret = -EFAULT; + + if (!ret) { + if (copy_to_user((void __user *)(arg + minsz), devices, + hdr.count * sizeof(*devices))) + ret = -EFAULT; + } + + kfree(devices); + return ret; + + } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) { + struct vfio_pci_hot_reset hdr; + int32_t *group_fds; + struct vfio_pci_group_entry *groups; + struct vfio_pci_group_info info; + bool slot = false; + int i, count = 0, ret = 0; + + minsz = offsetofend(struct vfio_pci_hot_reset, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + if (hdr.argsz < minsz || hdr.flags) + return -EINVAL; + + /* Can we do a slot or bus reset or neither? */ + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return -ENODEV; + + /* + * We can't let userspace give us an arbitrarily large + * buffer to copy, so verify how many we think there + * could be. Note groups can have multiple devices so + * one group per device is the max. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_count_devs, + &count, slot); + if (ret) + return ret; + + /* Somewhere between 1 and count is OK */ + if (!hdr.count || hdr.count > count) + return -EINVAL; + + group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL); + groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL); + if (!group_fds || !groups) { + kfree(group_fds); + kfree(groups); + return -ENOMEM; + } + + if (copy_from_user(group_fds, (void __user *)(arg + minsz), + hdr.count * sizeof(*group_fds))) { + kfree(group_fds); + kfree(groups); + return -EFAULT; + } + + /* + * For each group_fd, get the group through the vfio external + * user interface and store the group and iommu ID. This + * ensures the group is held across the reset. + */ + for (i = 0; i < hdr.count; i++) { + struct vfio_group *group; + struct fd f = fdget(group_fds[i]); + if (!f.file) { + ret = -EBADF; + break; + } + + group = vfio_group_get_external_user(f.file); + fdput(f); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + break; + } + + groups[i].group = group; + groups[i].id = vfio_external_user_iommu_id(group); + } + + kfree(group_fds); + + /* release reference to groups on error */ + if (ret) + goto hot_reset_release; + + info.count = hdr.count; + info.groups = groups; + + /* + * Test whether all the affected devices are contained + * by the set of groups provided by the user. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_validate_devs, + &info, slot); + if (!ret) + /* User has access, do the reset */ + ret = slot ? pci_try_reset_slot(vdev->pdev->slot) : + pci_try_reset_bus(vdev->pdev->bus); + +hot_reset_release: + for (i--; i >= 0; i--) + vfio_group_put_external_user(groups[i].group); + + kfree(groups); + return ret; + } return -ENOTTY; } @@ -599,9 +872,13 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_DISCONNECT; } + mutex_lock(&vdev->igate); + if (vdev->err_trigger) eventfd_signal(vdev->err_trigger, 1); + mutex_unlock(&vdev->igate); + vfio_device_put(device); return PCI_ERS_RESULT_CAN_RECOVER; diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index affa34745be..e50790e91f7 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -975,20 +975,20 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos) int ret, evcc, phases, vc_arb; int len = PCI_CAP_VC_BASE_SIZEOF; - ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG1, &tmp); + ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp); if (ret) return pcibios_err_to_errno(ret); - evcc = tmp & PCI_VC_REG1_EVCC; /* extended vc count */ - ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG2, &tmp); + evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */ + ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp); if (ret) return pcibios_err_to_errno(ret); - if (tmp & PCI_VC_REG2_128_PHASE) + if (tmp & PCI_VC_CAP2_128_PHASE) phases = 128; - else if (tmp & PCI_VC_REG2_64_PHASE) + else if (tmp & PCI_VC_CAP2_64_PHASE) phases = 64; - else if (tmp & PCI_VC_REG2_32_PHASE) + else if (tmp & PCI_VC_CAP2_32_PHASE) phases = 32; else phases = 0; @@ -1012,6 +1012,7 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos) static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos) { struct pci_dev *pdev = vdev->pdev; + u32 dword; u16 word; u8 byte; int ret; @@ -1025,7 +1026,9 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos) return pcibios_err_to_errno(ret); if (PCI_X_CMD_VERSION(word)) { - vdev->extended_caps = true; + /* Test for extended capabilities */ + pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword); + vdev->extended_caps = (dword != 0); return PCI_CAP_PCIX_SIZEOF_V2; } else return PCI_CAP_PCIX_SIZEOF_V0; @@ -1037,9 +1040,11 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos) return byte; case PCI_CAP_ID_EXP: - /* length based on version */ - vdev->extended_caps = true; + /* Test for extended capabilities */ + pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword); + vdev->extended_caps = (dword != 0); + /* length based on version */ if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1; else @@ -1121,8 +1126,7 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos) return pcibios_err_to_errno(ret); byte &= PCI_DPA_CAP_SUBSTATE_MASK; - byte = round_up(byte + 1, 4); - return PCI_DPA_BASE_SIZEOF + byte; + return PCI_DPA_BASE_SIZEOF + byte + 1; case PCI_EXT_CAP_ID_TPH: ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword); if (ret) @@ -1131,9 +1135,9 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos) if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) { int sts; - sts = byte & PCI_TPH_CAP_ST_MASK; + sts = dword & PCI_TPH_CAP_ST_MASK; sts >>= PCI_TPH_CAP_ST_SHIFT; - return PCI_TPH_BASE_SIZEOF + round_up(sts * 2, 4); + return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2; } return PCI_TPH_BASE_SIZEOF; default: diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 4bc704e1b7c..9dd49c9839a 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -130,8 +130,8 @@ static int virqfd_enable(struct vfio_pci_device *vdev, void (*thread)(struct vfio_pci_device *, void *), void *data, struct virqfd **pvirqfd, int fd) { - struct file *file = NULL; - struct eventfd_ctx *ctx = NULL; + struct fd irqfd; + struct eventfd_ctx *ctx; struct virqfd *virqfd; int ret = 0; unsigned int events; @@ -149,16 +149,16 @@ static int virqfd_enable(struct vfio_pci_device *vdev, INIT_WORK(&virqfd->shutdown, virqfd_shutdown); INIT_WORK(&virqfd->inject, virqfd_inject); - file = eventfd_fget(fd); - if (IS_ERR(file)) { - ret = PTR_ERR(file); - goto fail; + irqfd = fdget(fd); + if (!irqfd.file) { + ret = -EBADF; + goto err_fd; } - ctx = eventfd_ctx_fileget(file); + ctx = eventfd_ctx_fileget(irqfd.file); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); - goto fail; + goto err_ctx; } virqfd->eventfd = ctx; @@ -174,7 +174,7 @@ static int virqfd_enable(struct vfio_pci_device *vdev, if (*pvirqfd) { spin_unlock_irq(&vdev->irqlock); ret = -EBUSY; - goto fail; + goto err_busy; } *pvirqfd = virqfd; @@ -187,7 +187,7 @@ static int virqfd_enable(struct vfio_pci_device *vdev, init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup); init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc); - events = file->f_op->poll(file, &virqfd->pt); + events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt); /* * Check if there was an event already pending on the eventfd @@ -202,17 +202,14 @@ static int virqfd_enable(struct vfio_pci_device *vdev, * Do not drop the file until the irqfd is fully initialized, * otherwise we might race against the POLLHUP. */ - fput(file); + fdput(irqfd); return 0; - -fail: - if (ctx && !IS_ERR(ctx)) - eventfd_ctx_put(ctx); - - if (file && !IS_ERR(file)) - fput(file); - +err_busy: + eventfd_ctx_put(ctx); +err_ctx: + fdput(irqfd); +err_fd: kfree(virqfd); return ret; @@ -485,15 +482,19 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) for (i = 0; i < nvec; i++) vdev->msix[i].entry = i; - ret = pci_enable_msix(pdev, vdev->msix, nvec); - if (ret) { + ret = pci_enable_msix_range(pdev, vdev->msix, 1, nvec); + if (ret < nvec) { + if (ret > 0) + pci_disable_msix(pdev); kfree(vdev->msix); kfree(vdev->ctx); return ret; } } else { - ret = pci_enable_msi_block(pdev, nvec); - if (ret) { + ret = pci_enable_msi_range(pdev, 1, nvec); + if (ret < nvec) { + if (ret > 0) + pci_disable_msi(pdev); kfree(vdev->ctx); return ret; } @@ -752,54 +753,37 @@ static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, unsigned count, uint32_t flags, void *data) { int32_t fd = *(int32_t *)data; - struct pci_dev *pdev = vdev->pdev; if ((index != VFIO_PCI_ERR_IRQ_INDEX) || !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK)) return -EINVAL; - /* - * device_lock synchronizes setting and checking of - * err_trigger. The vfio_pci_aer_err_detected() is also - * called with device_lock held. - */ - /* DATA_NONE/DATA_BOOL enables loopback testing */ - if (flags & VFIO_IRQ_SET_DATA_NONE) { - device_lock(&pdev->dev); if (vdev->err_trigger) eventfd_signal(vdev->err_trigger, 1); - device_unlock(&pdev->dev); return 0; } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t trigger = *(uint8_t *)data; - device_lock(&pdev->dev); if (trigger && vdev->err_trigger) eventfd_signal(vdev->err_trigger, 1); - device_unlock(&pdev->dev); return 0; } /* Handle SET_DATA_EVENTFD */ - if (fd == -1) { - device_lock(&pdev->dev); if (vdev->err_trigger) eventfd_ctx_put(vdev->err_trigger); vdev->err_trigger = NULL; - device_unlock(&pdev->dev); return 0; } else if (fd >= 0) { struct eventfd_ctx *efdctx; efdctx = eventfd_ctx_fdget(fd); if (IS_ERR(efdctx)) return PTR_ERR(efdctx); - device_lock(&pdev->dev); if (vdev->err_trigger) eventfd_ctx_put(vdev->err_trigger); vdev->err_trigger = efdctx; - device_unlock(&pdev->dev); return 0; } else return -EINVAL; diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 842f4507883..f018d8d0f97 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -22,6 +22,7 @@ #include <linux/idr.h> #include <linux/iommu.h> #include <linux/list.h> +#include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/rwsem.h> @@ -45,9 +46,7 @@ static struct vfio { struct idr group_idr; struct mutex group_lock; struct cdev group_cdev; - struct device *dev; - dev_t devt; - struct cdev cdev; + dev_t group_devt; wait_queue_head_t release_q; } vfio; @@ -142,8 +141,7 @@ EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver); */ static int vfio_alloc_group_minor(struct vfio_group *group) { - /* index 0 is used by /dev/vfio/vfio */ - return idr_alloc(&vfio.group_idr, group, 1, MINORMASK + 1, GFP_KERNEL); + return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL); } static void vfio_free_group_minor(int minor) @@ -243,7 +241,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) } } - dev = device_create(vfio.class, NULL, MKDEV(MAJOR(vfio.devt), minor), + dev = device_create(vfio.class, NULL, + MKDEV(MAJOR(vfio.group_devt), minor), group, "%d", iommu_group_id(iommu_group)); if (IS_ERR(dev)) { vfio_free_group_minor(minor); @@ -268,7 +267,7 @@ static void vfio_group_release(struct kref *kref) WARN_ON(!list_empty(&group->device_list)); - device_destroy(vfio.class, MKDEV(MAJOR(vfio.devt), group->minor)); + device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); list_del(&group->vfio_next); vfio_free_group_minor(group->minor); vfio_group_unlock_and_free(group); @@ -350,7 +349,6 @@ struct vfio_device *vfio_group_create_device(struct vfio_group *group, void *device_data) { struct vfio_device *device; - int ret; device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) @@ -361,12 +359,7 @@ struct vfio_device *vfio_group_create_device(struct vfio_group *group, device->group = group; device->ops = ops; device->device_data = device_data; - - ret = dev_set_drvdata(dev, device); - if (ret) { - kfree(device); - return ERR_PTR(ret); - } + dev_set_drvdata(dev, device); /* No need to get group_lock, caller has group reference */ vfio_group_get(group); @@ -1109,7 +1102,7 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) * We can't use anon_inode_getfd() because we need to modify * the f_mode flags directly to allow more than just ioctls */ - ret = get_unused_fd(); + ret = get_unused_fd_flags(O_CLOEXEC); if (ret < 0) { device->ops->release(device->device_data); break; @@ -1353,16 +1346,89 @@ static const struct file_operations vfio_device_fops = { }; /** + * External user API, exported by symbols to be linked dynamically. + * + * The protocol includes: + * 1. do normal VFIO init operation: + * - opening a new container; + * - attaching group(s) to it; + * - setting an IOMMU driver for a container. + * When IOMMU is set for a container, all groups in it are + * considered ready to use by an external user. + * + * 2. User space passes a group fd to an external user. + * The external user calls vfio_group_get_external_user() + * to verify that: + * - the group is initialized; + * - IOMMU is set for it. + * If both checks passed, vfio_group_get_external_user() + * increments the container user counter to prevent + * the VFIO group from disposal before KVM exits. + * + * 3. The external user calls vfio_external_user_iommu_id() + * to know an IOMMU ID. + * + * 4. When the external KVM finishes, it calls + * vfio_group_put_external_user() to release the VFIO group. + * This call decrements the container user counter. + */ +struct vfio_group *vfio_group_get_external_user(struct file *filep) +{ + struct vfio_group *group = filep->private_data; + + if (filep->f_op != &vfio_group_fops) + return ERR_PTR(-EINVAL); + + if (!atomic_inc_not_zero(&group->container_users)) + return ERR_PTR(-EINVAL); + + if (!group->container->iommu_driver || + !vfio_group_viable(group)) { + atomic_dec(&group->container_users); + return ERR_PTR(-EINVAL); + } + + vfio_group_get(group); + + return group; +} +EXPORT_SYMBOL_GPL(vfio_group_get_external_user); + +void vfio_group_put_external_user(struct vfio_group *group) +{ + vfio_group_put(group); + vfio_group_try_dissolve_container(group); +} +EXPORT_SYMBOL_GPL(vfio_group_put_external_user); + +int vfio_external_user_iommu_id(struct vfio_group *group) +{ + return iommu_group_id(group->iommu_group); +} +EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id); + +long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) +{ + return vfio_ioctl_check_extension(group->container, arg); +} +EXPORT_SYMBOL_GPL(vfio_external_check_extension); + +/** * Module/class support */ static char *vfio_devnode(struct device *dev, umode_t *mode) { - if (mode && (MINOR(dev->devt) == 0)) - *mode = S_IRUGO | S_IWUGO; - return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); } +static struct miscdevice vfio_dev = { + .minor = VFIO_MINOR, + .name = "vfio", + .fops = &vfio_fops, + .nodename = "vfio/vfio", + .mode = S_IRUGO | S_IWUGO, +}; + static int __init vfio_init(void) { int ret; @@ -1374,6 +1440,13 @@ static int __init vfio_init(void) INIT_LIST_HEAD(&vfio.iommu_drivers_list); init_waitqueue_head(&vfio.release_q); + ret = misc_register(&vfio_dev); + if (ret) { + pr_err("vfio: misc device register failed\n"); + return ret; + } + + /* /dev/vfio/$GROUP */ vfio.class = class_create(THIS_MODULE, "vfio"); if (IS_ERR(vfio.class)) { ret = PTR_ERR(vfio.class); @@ -1382,27 +1455,14 @@ static int __init vfio_init(void) vfio.class->devnode = vfio_devnode; - ret = alloc_chrdev_region(&vfio.devt, 0, MINORMASK, "vfio"); - if (ret) - goto err_base_chrdev; - - cdev_init(&vfio.cdev, &vfio_fops); - ret = cdev_add(&vfio.cdev, vfio.devt, 1); + ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio"); if (ret) - goto err_base_cdev; + goto err_alloc_chrdev; - vfio.dev = device_create(vfio.class, NULL, vfio.devt, NULL, "vfio"); - if (IS_ERR(vfio.dev)) { - ret = PTR_ERR(vfio.dev); - goto err_base_dev; - } - - /* /dev/vfio/$GROUP */ cdev_init(&vfio.group_cdev, &vfio_group_fops); - ret = cdev_add(&vfio.group_cdev, - MKDEV(MAJOR(vfio.devt), 1), MINORMASK - 1); + ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK); if (ret) - goto err_groups_cdev; + goto err_cdev_add; pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); @@ -1416,16 +1476,13 @@ static int __init vfio_init(void) return 0; -err_groups_cdev: - device_destroy(vfio.class, vfio.devt); -err_base_dev: - cdev_del(&vfio.cdev); -err_base_cdev: - unregister_chrdev_region(vfio.devt, MINORMASK); -err_base_chrdev: +err_cdev_add: + unregister_chrdev_region(vfio.group_devt, MINORMASK); +err_alloc_chrdev: class_destroy(vfio.class); vfio.class = NULL; err_class: + misc_deregister(&vfio_dev); return ret; } @@ -1435,11 +1492,10 @@ static void __exit vfio_cleanup(void) idr_destroy(&vfio.group_idr); cdev_del(&vfio.group_cdev); - device_destroy(vfio.class, vfio.devt); - cdev_del(&vfio.cdev); - unregister_chrdev_region(vfio.devt, MINORMASK); + unregister_chrdev_region(vfio.group_devt, MINORMASK); class_destroy(vfio.class); vfio.class = NULL; + misc_deregister(&vfio_dev); } module_init(vfio_init); @@ -1449,3 +1505,5 @@ MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_ALIAS_MISCDEV(VFIO_MINOR); +MODULE_ALIAS("devname:vfio/vfio"); diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index bdae7a04af7..a84788ba662 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container) * enforcing the limit based on the max that the guest can map. */ down_write(¤t->mm->mmap_sem); - npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; + npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; locked = current->mm->locked_vm + npages; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { @@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container) down_write(¤t->mm->mmap_sem); current->mm->locked_vm -= (container->tbl->it_size << - IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; + IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; up_write(¤t->mm->mmap_sem); } @@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data, if (info.argsz < minsz) return -EINVAL; - info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT; - info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT; + info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K; + info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K; info.flags = 0; if (copy_to_user((void __user *)arg, &info, minsz)) @@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data, VFIO_DMA_MAP_FLAG_WRITE)) return -EINVAL; - if ((param.size & ~IOMMU_PAGE_MASK) || - (param.vaddr & ~IOMMU_PAGE_MASK)) + if ((param.size & ~IOMMU_PAGE_MASK_4K) || + (param.vaddr & ~IOMMU_PAGE_MASK_4K)) return -EINVAL; /* iova is checked by the IOMMU API */ @@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data, if (ret) return ret; - for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) { + for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) { ret = iommu_put_tce_user_mode(tbl, - (param.iova >> IOMMU_PAGE_SHIFT) + i, + (param.iova >> IOMMU_PAGE_SHIFT_4K) + i, tce); if (ret) break; - tce += IOMMU_PAGE_SIZE; + tce += IOMMU_PAGE_SIZE_4K; } if (ret) iommu_clear_tces_and_put_pages(tbl, - param.iova >> IOMMU_PAGE_SHIFT, i); + param.iova >> IOMMU_PAGE_SHIFT_4K, i); iommu_flush_tce(tbl); @@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data, if (param.flags) return -EINVAL; - if (param.size & ~IOMMU_PAGE_MASK) + if (param.size & ~IOMMU_PAGE_MASK_4K) return -EINVAL; ret = iommu_tce_clear_param_check(tbl, param.iova, 0, - param.size >> IOMMU_PAGE_SHIFT); + param.size >> IOMMU_PAGE_SHIFT_4K); if (ret) return ret; ret = iommu_clear_tces_and_put_pages(tbl, - param.iova >> IOMMU_PAGE_SHIFT, - param.size >> IOMMU_PAGE_SHIFT); + param.iova >> IOMMU_PAGE_SHIFT_4K, + param.size >> IOMMU_PAGE_SHIFT_4K); iommu_flush_tce(tbl); return ret; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index a9807dea388..0734fbe5b65 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -30,7 +30,6 @@ #include <linux/iommu.h> #include <linux/module.h> #include <linux/mm.h> -#include <linux/pci.h> /* pci_bus_type */ #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/slab.h> @@ -55,11 +54,17 @@ MODULE_PARM_DESC(disable_hugepages, "Disable VFIO IOMMU support for IOMMU hugepages."); struct vfio_iommu { - struct iommu_domain *domain; + struct list_head domain_list; struct mutex lock; struct rb_root dma_list; + bool v2; +}; + +struct vfio_domain { + struct iommu_domain *domain; + struct list_head next; struct list_head group_list; - bool cache; + int prot; /* IOMMU_CACHE */ }; struct vfio_dma { @@ -99,7 +104,7 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, return NULL; } -static void vfio_insert_dma(struct vfio_iommu *iommu, struct vfio_dma *new) +static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) { struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; struct vfio_dma *dma; @@ -118,7 +123,7 @@ static void vfio_insert_dma(struct vfio_iommu *iommu, struct vfio_dma *new) rb_insert_color(&new->node, &iommu->dma_list); } -static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *old) +static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) { rb_erase(&old->node, &iommu->dma_list); } @@ -186,12 +191,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn) if (pfn_valid(pfn)) { bool reserved; struct page *tail = pfn_to_page(pfn); - struct page *head = compound_trans_head(tail); + struct page *head = compound_head(tail); reserved = !!(PageReserved(head)); if (head != tail) { /* * "head" is not a dangling pointer - * (compound_trans_head takes care of that) + * (compound_head takes care of that) * but the hugepage may have been split * from under us (and we may not hold a * reference count on the head page so it can @@ -322,32 +327,39 @@ static long vfio_unpin_pages(unsigned long pfn, long npage, return unlocked; } -static int vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, - dma_addr_t iova, size_t *size) +static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma) { - dma_addr_t start = iova, end = iova + *size; + dma_addr_t iova = dma->iova, end = dma->iova + dma->size; + struct vfio_domain *domain, *d; long unlocked = 0; + if (!dma->size) + return; + /* + * We use the IOMMU to track the physical addresses, otherwise we'd + * need a much more complicated tracking system. Unfortunately that + * means we need to use one of the iommu domains to figure out the + * pfns to unpin. The rest need to be unmapped in advance so we have + * no iommu translations remaining when the pages are unpinned. + */ + domain = d = list_first_entry(&iommu->domain_list, + struct vfio_domain, next); + + list_for_each_entry_continue(d, &iommu->domain_list, next) + iommu_unmap(d->domain, dma->iova, dma->size); + while (iova < end) { size_t unmapped; phys_addr_t phys; - /* - * We use the IOMMU to track the physical address. This - * saves us from having a lot more entries in our mapping - * tree. The downside is that we don't track the size - * used to do the mapping. We request unmap of a single - * page, but expect IOMMUs that support large pages to - * unmap a larger chunk. - */ - phys = iommu_iova_to_phys(iommu->domain, iova); + phys = iommu_iova_to_phys(domain->domain, iova); if (WARN_ON(!phys)) { iova += PAGE_SIZE; continue; } - unmapped = iommu_unmap(iommu->domain, iova, PAGE_SIZE); - if (!unmapped) + unmapped = iommu_unmap(domain->domain, iova, PAGE_SIZE); + if (WARN_ON(!unmapped)) break; unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT, @@ -357,119 +369,26 @@ static int vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, } vfio_lock_acct(-unlocked); - - *size = iova - start; - - return 0; } -static int vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start, - size_t *size, struct vfio_dma *dma) +static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) { - size_t offset, overlap, tmp; - struct vfio_dma *split; - int ret; - - if (!*size) - return 0; - - /* - * Existing dma region is completely covered, unmap all. This is - * the likely case since userspace tends to map and unmap buffers - * in one shot rather than multiple mappings within a buffer. - */ - if (likely(start <= dma->iova && - start + *size >= dma->iova + dma->size)) { - *size = dma->size; - ret = vfio_unmap_unpin(iommu, dma, dma->iova, size); - if (ret) - return ret; - - /* - * Did we remove more than we have? Should never happen - * since a vfio_dma is contiguous in iova and vaddr. - */ - WARN_ON(*size != dma->size); - - vfio_remove_dma(iommu, dma); - kfree(dma); - return 0; - } - - /* Overlap low address of existing range */ - if (start <= dma->iova) { - overlap = start + *size - dma->iova; - ret = vfio_unmap_unpin(iommu, dma, dma->iova, &overlap); - if (ret) - return ret; - - vfio_remove_dma(iommu, dma); - - /* - * Check, we may have removed to whole vfio_dma. If not - * fixup and re-insert. - */ - if (overlap < dma->size) { - dma->iova += overlap; - dma->vaddr += overlap; - dma->size -= overlap; - vfio_insert_dma(iommu, dma); - } else - kfree(dma); - - *size = overlap; - return 0; - } - - /* Overlap high address of existing range */ - if (start + *size >= dma->iova + dma->size) { - offset = start - dma->iova; - overlap = dma->size - offset; - - ret = vfio_unmap_unpin(iommu, dma, start, &overlap); - if (ret) - return ret; - - dma->size -= overlap; - *size = overlap; - return 0; - } - - /* Split existing */ - - /* - * Allocate our tracking structure early even though it may not - * be used. An Allocation failure later loses track of pages and - * is more difficult to unwind. - */ - split = kzalloc(sizeof(*split), GFP_KERNEL); - if (!split) - return -ENOMEM; - - offset = start - dma->iova; - - ret = vfio_unmap_unpin(iommu, dma, start, size); - if (ret || !*size) { - kfree(split); - return ret; - } - - tmp = dma->size; + vfio_unmap_unpin(iommu, dma); + vfio_unlink_dma(iommu, dma); + kfree(dma); +} - /* Resize the lower vfio_dma in place, before the below insert */ - dma->size = offset; +static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) +{ + struct vfio_domain *domain; + unsigned long bitmap = PAGE_MASK; - /* Insert new for remainder, assuming it didn't all get unmapped */ - if (likely(offset + *size < tmp)) { - split->size = tmp - offset - *size; - split->iova = dma->iova + offset + *size; - split->vaddr = dma->vaddr + offset + *size; - split->prot = dma->prot; - vfio_insert_dma(iommu, split); - } else - kfree(split); + mutex_lock(&iommu->lock); + list_for_each_entry(domain, &iommu->domain_list, next) + bitmap &= domain->domain->ops->pgsize_bitmap; + mutex_unlock(&iommu->lock); - return 0; + return bitmap; } static int vfio_dma_do_unmap(struct vfio_iommu *iommu, @@ -477,10 +396,10 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, { uint64_t mask; struct vfio_dma *dma; - size_t unmapped = 0, size; + size_t unmapped = 0; int ret = 0; - mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1; + mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; if (unmap->iova & mask) return -EINVAL; @@ -491,20 +410,61 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, mutex_lock(&iommu->lock); + /* + * vfio-iommu-type1 (v1) - User mappings were coalesced together to + * avoid tracking individual mappings. This means that the granularity + * of the original mapping was lost and the user was allowed to attempt + * to unmap any range. Depending on the contiguousness of physical + * memory and page sizes supported by the IOMMU, arbitrary unmaps may + * or may not have worked. We only guaranteed unmap granularity + * matching the original mapping; even though it was untracked here, + * the original mappings are reflected in IOMMU mappings. This + * resulted in a couple unusual behaviors. First, if a range is not + * able to be unmapped, ex. a set of 4k pages that was mapped as a + * 2M hugepage into the IOMMU, the unmap ioctl returns success but with + * a zero sized unmap. Also, if an unmap request overlaps the first + * address of a hugepage, the IOMMU will unmap the entire hugepage. + * This also returns success and the returned unmap size reflects the + * actual size unmapped. + * + * We attempt to maintain compatibility with this "v1" interface, but + * we take control out of the hands of the IOMMU. Therefore, an unmap + * request offset from the beginning of the original mapping will + * return success with zero sized unmap. And an unmap request covering + * the first iova of mapping will unmap the entire range. + * + * The v2 version of this interface intends to be more deterministic. + * Unmap requests must fully cover previous mappings. Multiple + * mappings may still be unmaped by specifying large ranges, but there + * must not be any previous mappings bisected by the range. An error + * will be returned if these conditions are not met. The v2 interface + * will only return success and a size of zero if there were no + * mappings within the range. + */ + if (iommu->v2) { + dma = vfio_find_dma(iommu, unmap->iova, 0); + if (dma && dma->iova != unmap->iova) { + ret = -EINVAL; + goto unlock; + } + dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0); + if (dma && dma->iova + dma->size != unmap->iova + unmap->size) { + ret = -EINVAL; + goto unlock; + } + } + while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { - size = unmap->size; - ret = vfio_remove_dma_overlap(iommu, unmap->iova, &size, dma); - if (ret || !size) + if (!iommu->v2 && unmap->iova > dma->iova) break; - unmapped += size; + unmapped += dma->size; + vfio_remove_dma(iommu, dma); } +unlock: mutex_unlock(&iommu->lock); - /* - * We may unmap more than requested, update the unmap struct so - * userspace can know. - */ + /* Report how much was unmapped */ unmap->size = unmapped; return ret; @@ -516,22 +476,47 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, * soon, so this is just a temporary workaround to break mappings down into * PAGE_SIZE. Better to map smaller pages than nothing. */ -static int map_try_harder(struct vfio_iommu *iommu, dma_addr_t iova, +static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova, unsigned long pfn, long npage, int prot) { long i; int ret; for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { - ret = iommu_map(iommu->domain, iova, + ret = iommu_map(domain->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, - PAGE_SIZE, prot); + PAGE_SIZE, prot | domain->prot); if (ret) break; } for (; i < npage && i > 0; i--, iova -= PAGE_SIZE) - iommu_unmap(iommu->domain, iova, PAGE_SIZE); + iommu_unmap(domain->domain, iova, PAGE_SIZE); + + return ret; +} + +static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, + unsigned long pfn, long npage, int prot) +{ + struct vfio_domain *d; + int ret; + + list_for_each_entry(d, &iommu->domain_list, next) { + ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, + npage << PAGE_SHIFT, prot | d->prot); + if (ret) { + if (ret != -EBUSY || + map_try_harder(d, iova, pfn, npage, prot)) + goto unwind; + } + } + + return 0; + +unwind: + list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) + iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); return ret; } @@ -539,16 +524,22 @@ static int map_try_harder(struct vfio_iommu *iommu, dma_addr_t iova, static int vfio_dma_do_map(struct vfio_iommu *iommu, struct vfio_iommu_type1_dma_map *map) { - dma_addr_t end, iova; + dma_addr_t iova = map->iova; unsigned long vaddr = map->vaddr; size_t size = map->size; long npage; int ret = 0, prot = 0; uint64_t mask; + struct vfio_dma *dma; + unsigned long pfn; - end = map->iova + map->size; + /* Verify that none of our __u64 fields overflow */ + if (map->size != size || map->vaddr != vaddr || map->iova != iova) + return -EINVAL; + + mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; - mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1; + WARN_ON(mask & PAGE_MASK); /* READ/WRITE from device perspective */ if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) @@ -556,210 +547,271 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, if (map->flags & VFIO_DMA_MAP_FLAG_READ) prot |= IOMMU_READ; - if (!prot) - return -EINVAL; /* No READ/WRITE? */ - - if (iommu->cache) - prot |= IOMMU_CACHE; - - if (vaddr & mask) - return -EINVAL; - if (map->iova & mask) - return -EINVAL; - if (!map->size || map->size & mask) + if (!prot || !size || (size | iova | vaddr) & mask) return -EINVAL; - WARN_ON(mask & PAGE_MASK); - - /* Don't allow IOVA wrap */ - if (end && end < map->iova) - return -EINVAL; - - /* Don't allow virtual address wrap */ - if (vaddr + map->size && vaddr + map->size < vaddr) + /* Don't allow IOVA or virtual address wrap */ + if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) return -EINVAL; mutex_lock(&iommu->lock); - if (vfio_find_dma(iommu, map->iova, map->size)) { + if (vfio_find_dma(iommu, iova, size)) { mutex_unlock(&iommu->lock); return -EEXIST; } - for (iova = map->iova; iova < end; iova += size, vaddr += size) { - struct vfio_dma *dma = NULL; - unsigned long pfn; - long i; + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) { + mutex_unlock(&iommu->lock); + return -ENOMEM; + } + + dma->iova = iova; + dma->vaddr = vaddr; + dma->prot = prot; + /* Insert zero-sized and grow as we map chunks of it */ + vfio_link_dma(iommu, dma); + + while (size) { /* Pin a contiguous chunk of memory */ - npage = vfio_pin_pages(vaddr, (end - iova) >> PAGE_SHIFT, - prot, &pfn); + npage = vfio_pin_pages(vaddr + dma->size, + size >> PAGE_SHIFT, prot, &pfn); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; break; } - /* Verify pages are not already mapped */ - for (i = 0; i < npage; i++) { - if (iommu_iova_to_phys(iommu->domain, - iova + (i << PAGE_SHIFT))) { - vfio_unpin_pages(pfn, npage, prot, true); - ret = -EBUSY; - break; - } - } - - ret = iommu_map(iommu->domain, iova, - (phys_addr_t)pfn << PAGE_SHIFT, - npage << PAGE_SHIFT, prot); + /* Map it! */ + ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot); if (ret) { - if (ret != -EBUSY || - map_try_harder(iommu, iova, pfn, npage, prot)) { - vfio_unpin_pages(pfn, npage, prot, true); - break; - } + vfio_unpin_pages(pfn, npage, prot, true); + break; } - size = npage << PAGE_SHIFT; - - /* - * Check if we abut a region below - nothing below 0. - * This is the most likely case when mapping chunks of - * physically contiguous regions within a virtual address - * range. Update the abutting entry in place since iova - * doesn't change. - */ - if (likely(iova)) { - struct vfio_dma *tmp; - tmp = vfio_find_dma(iommu, iova - 1, 1); - if (tmp && tmp->prot == prot && - tmp->vaddr + tmp->size == vaddr) { - tmp->size += size; - iova = tmp->iova; - size = tmp->size; - vaddr = tmp->vaddr; - dma = tmp; - } - } + size -= npage << PAGE_SHIFT; + dma->size += npage << PAGE_SHIFT; + } - /* - * Check if we abut a region above - nothing above ~0 + 1. - * If we abut above and below, remove and free. If only - * abut above, remove, modify, reinsert. - */ - if (likely(iova + size)) { - struct vfio_dma *tmp; - tmp = vfio_find_dma(iommu, iova + size, 1); - if (tmp && tmp->prot == prot && - tmp->vaddr == vaddr + size) { - vfio_remove_dma(iommu, tmp); - if (dma) { - dma->size += tmp->size; - kfree(tmp); - } else { - size += tmp->size; - tmp->size = size; - tmp->iova = iova; - tmp->vaddr = vaddr; - vfio_insert_dma(iommu, tmp); - dma = tmp; - } - } - } + if (ret) + vfio_remove_dma(iommu, dma); + + mutex_unlock(&iommu->lock); + return ret; +} + +static int vfio_bus_type(struct device *dev, void *data) +{ + struct bus_type **bus = data; - if (!dma) { - dma = kzalloc(sizeof(*dma), GFP_KERNEL); - if (!dma) { - iommu_unmap(iommu->domain, iova, size); - vfio_unpin_pages(pfn, npage, prot, true); - ret = -ENOMEM; - break; + if (*bus && *bus != dev->bus) + return -EINVAL; + + *bus = dev->bus; + + return 0; +} + +static int vfio_iommu_replay(struct vfio_iommu *iommu, + struct vfio_domain *domain) +{ + struct vfio_domain *d; + struct rb_node *n; + int ret; + + /* Arbitrarily pick the first domain in the list for lookups */ + d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + n = rb_first(&iommu->dma_list); + + /* If there's not a domain, there better not be any mappings */ + if (WARN_ON(n && !d)) + return -EINVAL; + + for (; n; n = rb_next(n)) { + struct vfio_dma *dma; + dma_addr_t iova; + + dma = rb_entry(n, struct vfio_dma, node); + iova = dma->iova; + + while (iova < dma->iova + dma->size) { + phys_addr_t phys = iommu_iova_to_phys(d->domain, iova); + size_t size; + + if (WARN_ON(!phys)) { + iova += PAGE_SIZE; + continue; } - dma->size = size; - dma->iova = iova; - dma->vaddr = vaddr; - dma->prot = prot; - vfio_insert_dma(iommu, dma); - } - } + size = PAGE_SIZE; - if (ret) { - struct vfio_dma *tmp; - iova = map->iova; - size = map->size; - while ((tmp = vfio_find_dma(iommu, iova, size))) { - int r = vfio_remove_dma_overlap(iommu, iova, - &size, tmp); - if (WARN_ON(r || !size)) - break; + while (iova + size < dma->iova + dma->size && + phys + size == iommu_iova_to_phys(d->domain, + iova + size)) + size += PAGE_SIZE; + + ret = iommu_map(domain->domain, iova, phys, + size, dma->prot | domain->prot); + if (ret) + return ret; + + iova += size; } } - mutex_unlock(&iommu->lock); - return ret; + return 0; } static int vfio_iommu_type1_attach_group(void *iommu_data, struct iommu_group *iommu_group) { struct vfio_iommu *iommu = iommu_data; - struct vfio_group *group, *tmp; + struct vfio_group *group, *g; + struct vfio_domain *domain, *d; + struct bus_type *bus = NULL; int ret; - group = kzalloc(sizeof(*group), GFP_KERNEL); - if (!group) - return -ENOMEM; - mutex_lock(&iommu->lock); - list_for_each_entry(tmp, &iommu->group_list, next) { - if (tmp->iommu_group == iommu_group) { + list_for_each_entry(d, &iommu->domain_list, next) { + list_for_each_entry(g, &d->group_list, next) { + if (g->iommu_group != iommu_group) + continue; + mutex_unlock(&iommu->lock); - kfree(group); return -EINVAL; } } + group = kzalloc(sizeof(*group), GFP_KERNEL); + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!group || !domain) { + ret = -ENOMEM; + goto out_free; + } + + group->iommu_group = iommu_group; + + /* Determine bus_type in order to allocate a domain */ + ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type); + if (ret) + goto out_free; + + domain->domain = iommu_domain_alloc(bus); + if (!domain->domain) { + ret = -EIO; + goto out_free; + } + + ret = iommu_attach_group(domain->domain, iommu_group); + if (ret) + goto out_domain; + + INIT_LIST_HEAD(&domain->group_list); + list_add(&group->next, &domain->group_list); + + if (!allow_unsafe_interrupts && + !iommu_domain_has_cap(domain->domain, IOMMU_CAP_INTR_REMAP)) { + pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", + __func__); + ret = -EPERM; + goto out_detach; + } + + if (iommu_domain_has_cap(domain->domain, IOMMU_CAP_CACHE_COHERENCY)) + domain->prot |= IOMMU_CACHE; + /* - * TODO: Domain have capabilities that might change as we add - * groups (see iommu->cache, currently never set). Check for - * them and potentially disallow groups to be attached when it - * would change capabilities (ugh). + * Try to match an existing compatible domain. We don't want to + * preclude an IOMMU driver supporting multiple bus_types and being + * able to include different bus_types in the same IOMMU domain, so + * we test whether the domains use the same iommu_ops rather than + * testing if they're on the same bus_type. */ - ret = iommu_attach_group(iommu->domain, iommu_group); - if (ret) { - mutex_unlock(&iommu->lock); - kfree(group); - return ret; + list_for_each_entry(d, &iommu->domain_list, next) { + if (d->domain->ops == domain->domain->ops && + d->prot == domain->prot) { + iommu_detach_group(domain->domain, iommu_group); + if (!iommu_attach_group(d->domain, iommu_group)) { + list_add(&group->next, &d->group_list); + iommu_domain_free(domain->domain); + kfree(domain); + mutex_unlock(&iommu->lock); + return 0; + } + + ret = iommu_attach_group(domain->domain, iommu_group); + if (ret) + goto out_domain; + } } - group->iommu_group = iommu_group; - list_add(&group->next, &iommu->group_list); + /* replay mappings on new domains */ + ret = vfio_iommu_replay(iommu, domain); + if (ret) + goto out_detach; + + list_add(&domain->next, &iommu->domain_list); mutex_unlock(&iommu->lock); return 0; + +out_detach: + iommu_detach_group(domain->domain, iommu_group); +out_domain: + iommu_domain_free(domain->domain); +out_free: + kfree(domain); + kfree(group); + mutex_unlock(&iommu->lock); + return ret; +} + +static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu) +{ + struct rb_node *node; + + while ((node = rb_first(&iommu->dma_list))) + vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); } static void vfio_iommu_type1_detach_group(void *iommu_data, struct iommu_group *iommu_group) { struct vfio_iommu *iommu = iommu_data; + struct vfio_domain *domain; struct vfio_group *group; mutex_lock(&iommu->lock); - list_for_each_entry(group, &iommu->group_list, next) { - if (group->iommu_group == iommu_group) { - iommu_detach_group(iommu->domain, iommu_group); + list_for_each_entry(domain, &iommu->domain_list, next) { + list_for_each_entry(group, &domain->group_list, next) { + if (group->iommu_group != iommu_group) + continue; + + iommu_detach_group(domain->domain, iommu_group); list_del(&group->next); kfree(group); - break; + /* + * Group ownership provides privilege, if the group + * list is empty, the domain goes away. If it's the + * last domain, then all the mappings go away too. + */ + if (list_empty(&domain->group_list)) { + if (list_is_singular(&iommu->domain_list)) + vfio_iommu_unmap_unpin_all(iommu); + iommu_domain_free(domain->domain); + list_del(&domain->next); + kfree(domain); + } + goto done; } } +done: mutex_unlock(&iommu->lock); } @@ -767,40 +819,17 @@ static void *vfio_iommu_type1_open(unsigned long arg) { struct vfio_iommu *iommu; - if (arg != VFIO_TYPE1_IOMMU) + if (arg != VFIO_TYPE1_IOMMU && arg != VFIO_TYPE1v2_IOMMU) return ERR_PTR(-EINVAL); iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); if (!iommu) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&iommu->group_list); + INIT_LIST_HEAD(&iommu->domain_list); iommu->dma_list = RB_ROOT; mutex_init(&iommu->lock); - - /* - * Wish we didn't have to know about bus_type here. - */ - iommu->domain = iommu_domain_alloc(&pci_bus_type); - if (!iommu->domain) { - kfree(iommu); - return ERR_PTR(-EIO); - } - - /* - * Wish we could specify required capabilities rather than create - * a domain, see what comes out and hope it doesn't change along - * the way. Fortunately we know interrupt remapping is global for - * our iommus. - */ - if (!allow_unsafe_interrupts && - !iommu_domain_has_cap(iommu->domain, IOMMU_CAP_INTR_REMAP)) { - pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", - __func__); - iommu_domain_free(iommu->domain); - kfree(iommu); - return ERR_PTR(-EPERM); - } + iommu->v2 = (arg == VFIO_TYPE1v2_IOMMU); return iommu; } @@ -808,26 +837,42 @@ static void *vfio_iommu_type1_open(unsigned long arg) static void vfio_iommu_type1_release(void *iommu_data) { struct vfio_iommu *iommu = iommu_data; + struct vfio_domain *domain, *domain_tmp; struct vfio_group *group, *group_tmp; - struct rb_node *node; - list_for_each_entry_safe(group, group_tmp, &iommu->group_list, next) { - iommu_detach_group(iommu->domain, group->iommu_group); - list_del(&group->next); - kfree(group); + vfio_iommu_unmap_unpin_all(iommu); + + list_for_each_entry_safe(domain, domain_tmp, + &iommu->domain_list, next) { + list_for_each_entry_safe(group, group_tmp, + &domain->group_list, next) { + iommu_detach_group(domain->domain, group->iommu_group); + list_del(&group->next); + kfree(group); + } + iommu_domain_free(domain->domain); + list_del(&domain->next); + kfree(domain); } - while ((node = rb_first(&iommu->dma_list))) { - struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); - size_t size = dma->size; - vfio_remove_dma_overlap(iommu, dma->iova, &size, dma); - if (WARN_ON(!size)) + kfree(iommu); +} + +static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) +{ + struct vfio_domain *domain; + int ret = 1; + + mutex_lock(&iommu->lock); + list_for_each_entry(domain, &iommu->domain_list, next) { + if (!(domain->prot & IOMMU_CACHE)) { + ret = 0; break; + } } + mutex_unlock(&iommu->lock); - iommu_domain_free(iommu->domain); - iommu->domain = NULL; - kfree(iommu); + return ret; } static long vfio_iommu_type1_ioctl(void *iommu_data, @@ -839,7 +884,12 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, if (cmd == VFIO_CHECK_EXTENSION) { switch (arg) { case VFIO_TYPE1_IOMMU: + case VFIO_TYPE1v2_IOMMU: return 1; + case VFIO_DMA_CC_IOMMU: + if (!iommu) + return 0; + return vfio_domains_have_iommu_cache(iommu); default: return 0; } @@ -856,7 +906,7 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, info.flags = 0; - info.iova_pgsizes = iommu->domain->ops->pgsize_bitmap; + info.iova_pgsizes = vfio_pgsize_bitmap(iommu); return copy_to_user((void __user *)arg, &info, minsz); @@ -909,9 +959,6 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { static int __init vfio_iommu_type1_init(void) { - if (!iommu_present(&pci_bus_type)) - return -ENODEV; - return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1); } |
