#include <linux/interrupt.h>
#include <linux/dmar.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <asm/io_apic.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <linux/intel-iommu.h>
#include <acpi/acpi.h>
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
#include <asm/msidef.h>
#include "irq_remapping.h"
struct ioapic_scope {
struct intel_iommu *iommu;
unsigned int id;
unsigned int bus; /* PCI bus number */
unsigned int devfn; /* PCI devfn number */
};
struct hpet_scope {
struct intel_iommu *iommu;
u8 id;
unsigned int bus;
unsigned int devfn;
};
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
static int ir_ioapic_num, ir_hpet_num;
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
struct irq_cfg *cfg = irq_get_chip_data(irq);
return cfg ? &cfg->irq_2_iommu : NULL;
}
int get_irte(int irq, struct irte *entry)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
unsigned long flags;
int index;
if (!entry || !irq_iommu)
return -1;
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
index = irq_iommu->irte_index + irq_iommu->sub_handle;
*entry = *(irq_iommu->iommu->ir_table->base + index);
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
u16 index, start_index;
unsigned int mask = 0;
unsigned long flags;
int i;
if (!count || !irq_iommu)
return -1;
/*
* start the IRTE search from index 0.
*/
index = start_index = 0;
if (count > 1) {
count = __roundup_pow_of_two(count);
mask = ilog2(count);
}
if (mask > ecap_max_handle_mask(iommu->ecap)) {
printk(KERN_ERR
"Requested mask %x exceeds the max invalidation handle"
" mask value %Lx\n", mask,
ecap_max_handle_mask(iommu->ecap));
return -1;
}
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
do {
for (i = index; i < index + count; i++)
if (table->base[i].present)
break;
/* empty index found */
if (i == index + count)
break;
index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
if (index == start_index) {
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate an IRTE\n");
return -1;
}
} while (1);
for (i = index; i < index + count; i++)
table->base[i].present = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index;
}
static int qi_flush_iec(struct intel_iommu