diff options
Diffstat (limited to 'drivers/iommu/amd_iommu_init.c')
| -rw-r--r-- | drivers/iommu/amd_iommu_init.c | 317 |
1 files changed, 281 insertions, 36 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 551768af3fe..0e08545d729 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -26,7 +26,6 @@ #include <linux/msi.h> #include <linux/amd-iommu.h> #include <linux/export.h> -#include <acpi/acpi.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/gart.h> @@ -99,7 +98,7 @@ struct ivhd_header { u64 mmio_phys; u16 pci_seg; u16 info; - u32 reserved; + u32 efr; } __attribute__((packed)); /* @@ -151,9 +150,10 @@ int amd_iommus_present; bool amd_iommu_np_cache __read_mostly; bool amd_iommu_iotlb_sup __read_mostly = true; -u32 amd_iommu_max_pasids __read_mostly = ~0; +u32 amd_iommu_max_pasid __read_mostly = ~0; bool amd_iommu_v2_present __read_mostly; +bool amd_iommu_pc_present __read_mostly; bool amd_iommu_force_isolation __read_mostly; @@ -213,6 +213,14 @@ enum iommu_init_state { IOMMU_INIT_ERROR, }; +/* Early ioapic and hpet maps from kernel command line */ +#define EARLY_MAP_SIZE 4 +static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; +static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; +static int __initdata early_ioapic_map_size; +static int __initdata early_hpet_map_size; +static bool __initdata cmdline_maps; + static enum iommu_init_state init_state = IOMMU_START_STATE; static int amd_iommu_enable_interrupts(void); @@ -361,23 +369,23 @@ static void iommu_disable(struct amd_iommu *iommu) * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in * the system has one. */ -static u8 __iomem * __init iommu_map_mmio_space(u64 address) +static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) { - if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { - pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", - address); + if (!request_mem_region(address, end, "amd_iommu")) { + pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n", + address, end); pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); return NULL; } - return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH); + return (u8 __iomem *)ioremap_nocache(address, end); } static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) { if (iommu->mmio_base) iounmap(iommu->mmio_base); - release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); + release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); } /**************************************************************************** @@ -703,31 +711,66 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, set_iommu_for_device(iommu, devid); } -static int add_special_device(u8 type, u8 id, u16 devid) +static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) { struct devid_map *entry; struct list_head *list; - if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET) + if (type == IVHD_SPECIAL_IOAPIC) + list = &ioapic_map; + else if (type == IVHD_SPECIAL_HPET) + list = &hpet_map; + else return -EINVAL; + list_for_each_entry(entry, list, list) { + if (!(entry->id == id && entry->cmd_line)) + continue; + + pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n", + type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); + + return 0; + } + entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; - entry->id = id; - entry->devid = devid; - - if (type == IVHD_SPECIAL_IOAPIC) - list = &ioapic_map; - else - list = &hpet_map; + entry->id = id; + entry->devid = devid; + entry->cmd_line = cmd_line; list_add_tail(&entry->list, list); return 0; } +static int __init add_early_maps(void) +{ + int i, ret; + + for (i = 0; i < early_ioapic_map_size; ++i) { + ret = add_special_device(IVHD_SPECIAL_IOAPIC, + early_ioapic_map[i].id, + early_ioapic_map[i].devid, + early_ioapic_map[i].cmd_line); + if (ret) + return ret; + } + + for (i = 0; i < early_hpet_map_size; ++i) { + ret = add_special_device(IVHD_SPECIAL_HPET, + early_hpet_map[i].id, + early_hpet_map[i].devid, + early_hpet_map[i].cmd_line); + if (ret) + return ret; + } + + return 0; +} + /* * Reads the device exclusion range from ACPI and initializes the IOMMU with * it @@ -745,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) * per device. But we can enable the exclusion range per * device. This is done here */ - set_dev_entry_bit(m->devid, DEV_ENTRY_EX); + set_dev_entry_bit(devid, DEV_ENTRY_EX); iommu->exclusion_start = m->range_start; iommu->exclusion_length = m->range_length; } @@ -764,6 +807,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, u32 dev_i, ext_flags = 0; bool alias = false; struct ivhd_entry *e; + int ret; + + + ret = add_early_maps(); + if (ret) + return ret; /* * First save the recommended feature enable bits from ACPI @@ -929,7 +978,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, PCI_FUNC(devid)); set_dev_entry_from_acpi(iommu, devid, e->flags, 0); - ret = add_special_device(type, handle, devid); + ret = add_special_device(type, handle, devid, false); if (ret) return ret; break; @@ -980,7 +1029,7 @@ static void __init free_iommu_all(void) * BIOS should disable L2B micellaneous clock gating by setting * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b */ -static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) +static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) { u32 value; @@ -1036,7 +1085,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->cap_ptr = h->cap_ptr; iommu->pci_seg = h->pci_seg; iommu->mmio_phys = h->mmio_phys; - iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); + + /* Check if IVHD EFR contains proper max banks/counters */ + if ((h->efr != 0) && + ((h->efr & (0xF << 13)) != 0) && + ((h->efr & (0x3F << 17)) != 0)) { + iommu->mmio_phys_end = MMIO_REG_END_OFFSET; + } else { + iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; + } + + iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, + iommu->mmio_phys_end); if (!iommu->mmio_base) return -ENOMEM; @@ -1111,6 +1171,33 @@ static int __init init_iommu_all(struct acpi_table_header *table) return 0; } + +static void init_iommu_perf_ctr(struct amd_iommu *iommu) +{ + u64 val = 0xabcd, val2 = 0; + + if (!iommu_feature(iommu, FEATURE_PC)) + return; + + amd_iommu_pc_present = true; + + /* Check if the performance counters can be written to */ + if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || + (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || + (val != val2)) { + pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; + return; + } + + pr_info("AMD-Vi: IOMMU performance counters supported\n"); + + val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); + iommu->max_banks = (u8) ((val >> 12) & 0x3f); + iommu->max_counters = (u8) ((val >> 7) & 0xf); +} + + static int iommu_init_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; @@ -1144,14 +1231,16 @@ static int iommu_init_pci(struct amd_iommu *iommu) if (iommu_feature(iommu, FEATURE_GT)) { int glxval; - u32 pasids; - u64 shift; + u32 max_pasid; + u64 pasmax; + + pasmax = iommu->features & FEATURE_PASID_MASK; + pasmax >>= FEATURE_PASID_SHIFT; + max_pasid = (1 << (pasmax + 1)) - 1; - shift = iommu->features & FEATURE_PASID_MASK; - shift >>= FEATURE_PASID_SHIFT; - pasids = (1 << shift); + amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); - amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); + BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); glxval = iommu->features & FEATURE_GLXVAL_MASK; glxval >>= FEATURE_GLXVAL_SHIFT; @@ -1177,6 +1266,8 @@ static int iommu_init_pci(struct amd_iommu *iommu) if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) amd_iommu_np_cache = true; + init_iommu_perf_ctr(iommu); + if (is_rd890_iommu(iommu->dev)) { int i, j; @@ -1229,7 +1320,7 @@ static void print_iommu_info(void) if (iommu_feature(iommu, (1ULL << i))) pr_cont(" %s", feat_str[i]); } - pr_cont("\n"); + pr_cont("\n"); } } if (irq_remapping_enabled) @@ -1275,7 +1366,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu) amd_iommu_int_handler, amd_iommu_int_thread, 0, "AMD-Vi", - iommu->dev); + iommu); if (r) { pci_disable_msi(iommu->dev); @@ -1294,7 +1385,7 @@ static int iommu_init_msi(struct amd_iommu *iommu) if (iommu->int_enabled) goto enable_faults; - if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) + if (iommu->dev->msi_cap) ret = iommu_setup_msi(iommu); else ret = -ENODEV; @@ -1638,18 +1729,28 @@ static void __init free_on_init_error(void) static bool __init check_ioapic_information(void) { + const char *fw_bug = FW_BUG; bool ret, has_sb_ioapic; int idx; has_sb_ioapic = false; ret = false; + /* + * If we have map overrides on the kernel command line the + * messages in this function might not describe firmware bugs + * anymore - so be careful + */ + if (cmdline_maps) + fw_bug = ""; + for (idx = 0; idx < nr_ioapics; idx++) { int devid, id = mpc_ioapic_id(idx); devid = get_ioapic_devid(id); if (devid < 0) { - pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id); + pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n", + fw_bug, id); ret = false; } else if (devid == IOAPIC_SB_DEVID) { has_sb_ioapic = true; @@ -1666,11 +1767,11 @@ static bool __init check_ioapic_information(void) * when the BIOS is buggy and provides us the wrong * device id for the IOAPIC in the system. */ - pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n"); + pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug); } if (!ret) - pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n"); + pr_err("AMD-Vi: Disabling interrupt remapping\n"); return ret; } @@ -1801,6 +1902,7 @@ static int __init early_amd_iommu_init(void) * Interrupt remapping enabled, create kmem_cache for the * remapping tables. */ + ret = -ENOMEM; amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", MAX_IRQS_PER_TABLE * sizeof(u32), IRQ_TABLE_ALIGNMENT, @@ -2097,8 +2199,70 @@ static int __init parse_amd_iommu_options(char *str) return 1; } -__setup("amd_iommu_dump", parse_amd_iommu_dump); -__setup("amd_iommu=", parse_amd_iommu_options); +static int __init parse_ivrs_ioapic(char *str) +{ + unsigned int bus, dev, fn; + int ret, id, i; + u16 devid; + + ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); + + if (ret != 4) { + pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str); + return 1; + } + + if (early_ioapic_map_size == EARLY_MAP_SIZE) { + pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", + str); + return 1; + } + + devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); + + cmdline_maps = true; + i = early_ioapic_map_size++; + early_ioapic_map[i].id = id; + early_ioapic_map[i].devid = devid; + early_ioapic_map[i].cmd_line = true; + + return 1; +} + +static int __init parse_ivrs_hpet(char *str) +{ + unsigned int bus, dev, fn; + int ret, id, i; + u16 devid; + + ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); + + if (ret != 4) { + pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str); + return 1; + } + + if (early_hpet_map_size == EARLY_MAP_SIZE) { + pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n", + str); + return 1; + } + + devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); + + cmdline_maps = true; + i = early_hpet_map_size++; + early_hpet_map[i].id = id; + early_hpet_map[i].devid = devid; + early_hpet_map[i].cmd_line = true; + + return 1; +} + +__setup("amd_iommu_dump", parse_amd_iommu_dump); +__setup("amd_iommu=", parse_amd_iommu_options); +__setup("ivrs_ioapic", parse_ivrs_ioapic); +__setup("ivrs_hpet", parse_ivrs_hpet); IOMMU_INIT_FINISH(amd_iommu_detect, gart_iommu_hole_init, @@ -2110,3 +2274,84 @@ bool amd_iommu_v2_supported(void) return amd_iommu_v2_present; } EXPORT_SYMBOL(amd_iommu_v2_supported); + +/**************************************************************************** + * + * IOMMU EFR Performance Counter support functionality. This code allows + * access to the IOMMU PC functionality. + * + ****************************************************************************/ + +u8 amd_iommu_pc_get_max_banks(u16 devid) +{ + struct amd_iommu *iommu; + u8 ret = 0; + + /* locate the iommu governing the devid */ + iommu = amd_iommu_rlookup_table[devid]; + if (iommu) + ret = iommu->max_banks; + + return ret; +} +EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); + +bool amd_iommu_pc_supported(void) +{ + return amd_iommu_pc_present; +} +EXPORT_SYMBOL(amd_iommu_pc_supported); + +u8 amd_iommu_pc_get_max_counters(u16 devid) +{ + struct amd_iommu *iommu; + u8 ret = 0; + + /* locate the iommu governing the devid */ + iommu = amd_iommu_rlookup_table[devid]; + if (iommu) + ret = iommu->max_counters; + + return ret; +} +EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); + +int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, + u64 *value, bool is_write) +{ + struct amd_iommu *iommu; + u32 offset; + u32 max_offset_lim; + + /* Make sure the IOMMU PC resource is available */ + if (!amd_iommu_pc_present) + return -ENODEV; + + /* Locate the iommu associated with the device ID */ + iommu = amd_iommu_rlookup_table[devid]; + + /* Check for valid iommu and pc register indexing */ + if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) + return -ENODEV; + + offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); + + /* Limit the offset to the hw defined mmio region aperture */ + max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) | + (iommu->max_counters << 8) | 0x28); + if ((offset < MMIO_CNTR_REG_OFFSET) || + (offset > max_offset_lim)) + return -EINVAL; + + if (is_write) { + writel((u32)*value, iommu->mmio_base + offset); + writel((*value >> 32), iommu->mmio_base + offset + 4); + } else { + *value = readl(iommu->mmio_base + offset + 4); + *value <<= 32; + *value = readl(iommu->mmio_base + offset); + } + + return 0; +} +EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); |
