aboutsummaryrefslogtreecommitdiff
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/boot.h6
-rw-r--r--include/asm-i386/bootparam.h85
-rw-r--r--include/asm-i386/cpufeature.h26
-rw-r--r--include/asm-i386/e820.h14
-rw-r--r--include/asm-i386/fb.h17
-rw-r--r--include/asm-i386/fixmap.h2
-rw-r--r--include/asm-i386/ide.h4
-rw-r--r--include/asm-i386/io.h1
-rw-r--r--include/asm-i386/irq.h1
-rw-r--r--include/asm-i386/mach-default/irq_vectors_limits.h2
-rw-r--r--include/asm-i386/mach-es7000/mach_apic.h4
-rw-r--r--include/asm-i386/mach-es7000/mach_mpparse.h6
-rw-r--r--include/asm-i386/mmu_context.h2
-rw-r--r--include/asm-i386/page.h3
-rw-r--r--include/asm-i386/paravirt.h24
-rw-r--r--include/asm-i386/pci.h37
-rw-r--r--include/asm-i386/pgalloc.h6
-rw-r--r--include/asm-i386/pgtable-2level.h8
-rw-r--r--include/asm-i386/pgtable-3level.h17
-rw-r--r--include/asm-i386/pgtable.h40
-rw-r--r--include/asm-i386/processor.h5
-rw-r--r--include/asm-i386/required-features.h39
-rw-r--r--include/asm-i386/setup.h14
-rw-r--r--include/asm-i386/smp.h5
-rw-r--r--include/asm-i386/thread_info.h5
-rw-r--r--include/asm-i386/timer.h32
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-i386/vmi_time.h2
-rw-r--r--include/asm-i386/xen/hypercall.h413
-rw-r--r--include/asm-i386/xen/hypervisor.h73
-rw-r--r--include/asm-i386/xen/interface.h188
31 files changed, 938 insertions, 146 deletions
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
index bd024ab4fe5..ed8affbf96c 100644
--- a/include/asm-i386/boot.h
+++ b/include/asm-i386/boot.h
@@ -1,5 +1,5 @@
-#ifndef _LINUX_BOOT_H
-#define _LINUX_BOOT_H
+#ifndef _ASM_BOOT_H
+#define _ASM_BOOT_H
/* Don't touch these, unless you really know what you're doing. */
#define DEF_INITSEG 0x9000
@@ -17,4 +17,4 @@
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
& ~(CONFIG_PHYSICAL_ALIGN - 1))
-#endif /* _LINUX_BOOT_H */
+#endif /* _ASM_BOOT_H */
diff --git a/include/asm-i386/bootparam.h b/include/asm-i386/bootparam.h
new file mode 100644
index 00000000000..427d8652bfd
--- /dev/null
+++ b/include/asm-i386/bootparam.h
@@ -0,0 +1,85 @@
+#ifndef _ASM_BOOTPARAM_H
+#define _ASM_BOOTPARAM_H
+
+#include <linux/types.h>
+#include <linux/screen_info.h>
+#include <linux/apm_bios.h>
+#include <asm/e820.h>
+#include <linux/edd.h>
+#include <video/edid.h>
+
+struct setup_header {
+ u8 setup_sects;
+ u16 root_flags;
+ u32 syssize;
+ u16 ram_size;
+ u16 vid_mode;
+ u16 root_dev;
+ u16 boot_flag;
+ u16 jump;
+ u32 header;
+ u16 version;
+ u32 realmode_swtch;
+ u16 start_sys;
+ u16 kernel_version;
+ u8 type_of_loader;
+ u8 loadflags;
+#define LOADED_HIGH 0x01
+#define CAN_USE_HEAP 0x80
+ u16 setup_move_size;
+ u32 code32_start;
+ u32 ramdisk_image;
+ u32 ramdisk_size;
+ u32 bootsect_kludge;
+ u16 heap_end_ptr;
+ u16 _pad1;
+ u32 cmd_line_ptr;
+ u32 initrd_addr_max;
+ u32 kernel_alignment;
+ u8 relocatable_kernel;
+} __attribute__((packed));
+
+struct sys_desc_table {
+ u16 length;
+ u8 table[14];
+};
+
+struct efi_info {
+ u32 _pad1;
+ u32 efi_systab;
+ u32 efi_memdesc_size;
+ u32 efi_memdec_version;
+ u32 efi_memmap;
+ u32 fi_memmap_size;
+ u32 _pad2[2];
+};
+
+/* The so-called "zeropage" */
+struct boot_params {
+ struct screen_info screen_info; /* 0x000 */
+ struct apm_bios_info apm_bios_info; /* 0x040 */
+ u8 _pad2[12]; /* 0x054 */
+ u32 speedstep_info[4]; /* 0x060 */
+ u8 _pad3[16]; /* 0x070 */
+ u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
+ u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
+ struct sys_desc_table sys_desc_table; /* 0x0a0 */
+ u8 _pad4[144]; /* 0x0b0 */
+ struct edid_info edid_info; /* 0x140 */
+ struct efi_info efi_info; /* 0x1c0 */
+ u32 alt_mem_k; /* 0x1e0 */
+ u32 scratch; /* Scratch field! */ /* 0x1e4 */
+ u8 e820_entries; /* 0x1e8 */
+ u8 eddbuf_entries; /* 0x1e9 */
+ u8 edd_mbr_sig_buf_entries; /* 0x1ea */
+ u8 _pad6[6]; /* 0x1eb */
+ struct setup_header hdr; /* setup header */ /* 0x1f1 */
+ u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
+ u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
+ struct e820entry e820_map[E820MAX]; /* 0x2d0 */
+ u8 _pad8[48]; /* 0xcd0 */
+ struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */
+ u8 _pad9[276]; /* 0xeec */
+} __attribute__((packed));
+
+#endif /* _ASM_BOOTPARAM_H */
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index f514e906643..c961c03cf1e 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -12,7 +12,7 @@
#endif
#include <asm/required-features.h>
-#define NCAPINTS 7 /* N 32-bit words worth of info */
+#define NCAPINTS 8 /* N 32-bit words worth of info */
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
@@ -81,6 +81,7 @@
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
#define X86_FEATURE_LAPIC_TIMER_BROKEN (3*32+ 14) /* lapic timer broken in C1 */
#define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */
+#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -108,11 +109,24 @@
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
-#define cpu_has(c, bit) \
- ((__builtin_constant_p(bit) && (bit) < 32 && \
- (1UL << (bit)) & REQUIRED_MASK1) ? \
- 1 : \
- test_bit(bit, (c)->x86_capability))
+/*
+ * Auxiliary flags: Linux defined - For features scattered in various
+ * CPUID levels like 0x6, 0xA etc
+ */
+#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
+
+#define cpu_has(c, bit) \
+ (__builtin_constant_p(bit) && \
+ ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
+ (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
+ (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
+ (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
+ (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
+ (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
+ (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
+ (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
+ ? 1 : \
+ test_bit(bit, (c)->x86_capability))
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index 096a2a8eb1d..c03290ccecb 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -25,13 +25,15 @@
#ifndef __ASSEMBLY__
+struct e820entry {
+ u64 addr; /* start of memory segment */
+ u64 size; /* size of memory segment */
+ u32 type; /* type of memory segment */
+} __attribute__((packed));
+
struct e820map {
- int nr_map;
- struct e820entry {
- unsigned long long addr; /* start of memory segment */
- unsigned long long size; /* size of memory segment */
- unsigned long type; /* type of memory segment */
- } map[E820MAX];
+ u32 nr_map;
+ struct e820entry map[E820MAX];
};
extern struct e820map e820;
diff --git a/include/asm-i386/fb.h b/include/asm-i386/fb.h
new file mode 100644
index 00000000000..d1c6297d4a6
--- /dev/null
+++ b/include/asm-i386/fb.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+extern int fb_is_primary_device(struct fb_info *info);
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ if (boot_cpu_data.x86 > 3)
+ pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index 80ea052ee3a..249e753ac80 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -54,6 +54,8 @@ extern unsigned long __FIXADDR_TOP;
enum fixed_addresses {
FIX_HOLE,
FIX_VDSO,
+ FIX_DBGP_BASE,
+ FIX_EARLYCON_MEM_BASE,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h
index 0fc240c80f4..e7817a3d657 100644
--- a/include/asm-i386/ide.h
+++ b/include/asm-i386/ide.h
@@ -40,14 +40,13 @@ static __inline__ int ide_default_irq(unsigned long base)
static __inline__ unsigned long ide_default_io_base(int index)
{
- struct pci_dev *pdev;
/*
* If PCI is present then it is not safe to poke around
* the other legacy IDE ports. Only 0x1f0 and 0x170 are
* defined compatibility mode ports for PCI. A user can
* override this using ide= but we must default safe.
*/
- if ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL)) == NULL) {
+ if (no_pci_devices()) {
switch(index) {
case 2: return 0x1e8;
case 3: return 0x168;
@@ -55,7 +54,6 @@ static __inline__ unsigned long ide_default_io_base(int index)
case 5: return 0x160;
}
}
- pci_dev_put(pdev);
switch (index) {
case 0: return 0x1f0;
case 1: return 0x170;
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index e797586a5bf..7b65b5b0003 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -129,6 +129,7 @@ extern void iounmap(volatile void __iomem *addr);
*/
extern void *bt_ioremap(unsigned long offset, unsigned long size);
extern void bt_iounmap(void *addr, unsigned long size);
+extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
/* Use early IO mappings for DMI because it's initialized early */
#define dmi_ioremap bt_ioremap
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 9e15ce0006e..36f310632c4 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -41,6 +41,7 @@ extern int irqbalance_disable(char *str);
extern void fixup_irqs(cpumask_t map);
#endif
+unsigned int do_IRQ(struct pt_regs *regs);
void init_IRQ(void);
void __init native_init_IRQ(void);
diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h
index 7f161e760be..a90c7a60109 100644
--- a/include/asm-i386/mach-default/irq_vectors_limits.h
+++ b/include/asm-i386/mach-default/irq_vectors_limits.h
@@ -1,7 +1,7 @@
#ifndef _ASM_IRQ_VECTORS_LIMITS_H
#define _ASM_IRQ_VECTORS_LIMITS_H
-#ifdef CONFIG_X86_IO_APIC
+#if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT)
#define NR_IRQS 224
# if (224 >= 32 * NR_CPUS)
# define NR_IRQ_VECTORS NR_IRQS
diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h
index 2d978928a39..caec64be516 100644
--- a/include/asm-i386/mach-es7000/mach_apic.h
+++ b/include/asm-i386/mach-es7000/mach_apic.h
@@ -73,6 +73,10 @@ static inline void init_apic_ldr(void)
apic_write_around(APIC_LDR, val);
}
+#ifndef CONFIG_X86_GENERICARCH
+extern void enable_apic_mode(void);
+#endif
+
extern int apic_version [MAX_APICS];
static inline void setup_apic_routing(void)
{
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h
index b9fb784e1fd..8aa10547b4b 100644
--- a/include/asm-i386/mach-es7000/mach_mpparse.h
+++ b/include/asm-i386/mach-es7000/mach_mpparse.h
@@ -18,6 +18,12 @@ extern int parse_unisys_oem (char *oemptr);
extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
extern void setup_unisys(void);
+#ifndef CONFIG_X86_GENERICARCH
+extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
+extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
+ char *productid);
+#endif
+
#ifdef CONFIG_ACPI
static inline int es7000_check_dsdt(void)
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 8198d1cca1f..7eb0b0b1fb3 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -32,6 +32,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#endif
}
+void leave_mm(unsigned long cpu);
+
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 818ac8bf01e..99cf5d3692a 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -34,7 +34,8 @@
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/*
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index d7a0512f88e..7df88be2dd9 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -52,6 +52,8 @@ struct paravirt_ops
/* Basic arch-specific setup */
void (*arch_setup)(void);
char *(*memory_setup)(void);
+ void (*post_allocator_init)(void);
+
void (*init_IRQ)(void);
void (*time_init)(void);
@@ -116,7 +118,7 @@ struct paravirt_ops
u64 (*read_tsc)(void);
u64 (*read_pmc)(void);
- u64 (*get_scheduled_cycles)(void);
+ unsigned long long (*sched_clock)(void);
unsigned long (*get_cpu_khz)(void);
/* Segment descriptor handling */
@@ -173,7 +175,7 @@ struct paravirt_ops
unsigned long va);
/* Hooks for allocating/releasing pagetable pages */
- void (*alloc_pt)(u32 pfn);
+ void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
void (*alloc_pd)(u32 pfn);
void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
void (*release_pt)(u32 pfn);
@@ -260,6 +262,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len)
unsigned paravirt_patch_insns(void *site, unsigned len,
const char *start, const char *end);
+int paravirt_disable_iospace(void);
/*
* This generates an indirect call based on the operation type number.
@@ -539,7 +542,7 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
val = paravirt_read_msr(msr, &_err); \
} while(0)
-#define wrmsrl(msr,val) ((void)paravirt_write_msr(msr, val, 0))
+#define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b)
/* rdmsr with exception handling */
@@ -563,7 +566,10 @@ static inline u64 paravirt_read_tsc(void)
#define rdtscll(val) (val = paravirt_read_tsc())
-#define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
+static inline unsigned long long paravirt_sched_clock(void)
+{
+ return PVOP_CALL0(unsigned long long, sched_clock);
+}
#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
@@ -669,6 +675,12 @@ static inline void setup_secondary_clock(void)
}
#endif
+static inline void paravirt_post_allocator_init(void)
+{
+ if (paravirt_ops.post_allocator_init)
+ (*paravirt_ops.post_allocator_init)();
+}
+
static inline void paravirt_pagetable_setup_start(pgd_t *base)
{
if (paravirt_ops.pagetable_setup_start)
@@ -725,9 +737,9 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va);
}
-static inline void paravirt_alloc_pt(unsigned pfn)
+static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
{
- PVOP_VCALL1(alloc_pt, pfn);
+ PVOP_VCALL2(alloc_pt, mm, pfn);
}
static inline void paravirt_release_pt(unsigned pfn)
{
diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h
index 64b6d0baedb..392d3fe5d45 100644
--- a/include/asm-i386/pci.h
+++ b/include/asm-i386/pci.h
@@ -56,48 +56,11 @@ struct pci_dev;
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-/* This is always fine. */
-#define pci_dac_dma_supported(pci_dev, mask) (1)
-
-static inline dma64_addr_t
-pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-{
- return ((dma64_addr_t) page_to_phys(page) +
- (dma64_addr_t) offset);
-}
-
-static inline struct page *
-pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return pfn_to_page(dma_addr >> PAGE_SHIFT);
-}
-
-static inline unsigned long
-pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return (dma_addr & ~PAGE_MASK);
-}
-
-static inline void
-pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-{
-}
-
-static inline void
-pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-{
- flush_write_buffers();
-}
-
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
-static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-{
-}
-
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index d07b7afc269..f2fc33ceb9f 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
-#define paravirt_alloc_pt(pfn) do { } while (0)
+#define paravirt_alloc_pt(mm, pfn) do { } while (0)
#define paravirt_alloc_pd(pfn) do { } while (0)
#define paravirt_alloc_pd(pfn) do { } while (0)
#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
@@ -17,13 +17,13 @@
#define pmd_populate_kernel(mm, pmd, pte) \
do { \
- paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \
+ paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
} while (0)
#define pmd_populate(mm, pmd, pte) \
do { \
- paravirt_alloc_pt(page_to_pfn(pte)); \
+ paravirt_alloc_pt(mm, page_to_pfn(pte)); \
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
(unsigned long long) PAGE_SHIFT))); \
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index a50fd1773de..84b03cf56a7 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -57,14 +57,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/*
- * All present user pages are user-executable:
- */
-static inline int pte_exec(pte_t pte)
-{
- return pte_user(pte);
-}
-
-/*
* All present pages are kernel-executable:
*/
static inline int pte_exec_kernel(pte_t pte)
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index eb0f1d7e96a..948a3341411 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -20,26 +20,11 @@
#define pud_present(pud) 1
/*
- * Is the pte executable?
- */
-static inline int pte_x(pte_t pte)
-{
- return !(pte_val(pte) & _PAGE_NX);
-}
-
-/*
- * All present user-pages with !NX bit are user-executable:
- */
-static inline int pte_exec(pte_t pte)
-{
- return pte_user(pte) && pte_x(pte);
-}
-/*
* All present pages with !NX bit are kernel-executable:
*/
static inline int pte_exec_kernel(pte_t pte)
{
- return pte_x(pte);
+ return !(pte_val(pte) & _PAGE_NX);
}
/* Rules for using set_pte: the pte being assigned *must* be
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 628fa7747d0..c7fefa6b12f 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -79,7 +79,7 @@ void paging_init(void);
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8*1024*1024)
-#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
+#define VMALLOC_START (((unsigned long) high_memory + \
2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
@@ -218,8 +218,6 @@ extern unsigned long pg0[];
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
-static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
@@ -230,13 +228,9 @@ static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
*/
static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
-static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
-static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
-static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
-static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
@@ -295,17 +289,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
__changed; \
})
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
- int __ret = 0; \
- if (pte_dirty(*(ptep))) \
- __ret = test_and_clear_bit(_PAGE_BIT_DIRTY, \
- &(ptep)->pte_low); \
- if (__ret) \
- pte_update((vma)->vm_mm, addr, ptep); \
- __ret; \
-})
-
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
int __ret = 0; \
@@ -317,27 +300,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
__ret; \
})
-/*
- * Rules for using ptep_establish: the pte MUST be a user pte, and
- * must be a present->present transition.
- */
-#define __HAVE_ARCH_PTEP_ESTABLISH
-#define ptep_establish(vma, address, ptep, pteval) \
-do { \
- set_pte_present((vma)->vm_mm, address, ptep, pteval); \
- flush_tlb_page(vma, address); \
-} while (0)
-
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(vma, address, ptep) \
-({ \
- int __dirty; \
- __dirty = ptep_test_and_clear_dirty((vma), (address), (ptep)); \
- if (__dirty) \
- flush_tlb_page(vma, address); \
- __dirty; \
-})
-
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(vma, address, ptep) \
({ \
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 338668bfb0a..422cffef00c 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -119,6 +119,7 @@ void __init cpu_detect(struct cpuinfo_x86 *c);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
+extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;
@@ -227,6 +228,10 @@ extern int bootloader_type;
#define HAVE_ARCH_PICK_MMAP_LAYOUT
+extern void hard_disable_TSC(void);
+extern void disable_TSC(void);
+extern void hard_enable_TSC(void);
+
/*
* Size of io_bitmap.
*/
diff --git a/include/asm-i386/required-features.h b/include/asm-i386/required-features.h
index 9db866c1e64..65848a00705 100644
--- a/include/asm-i386/required-features.h
+++ b/include/asm-i386/required-features.h
@@ -3,32 +3,53 @@
/* Define minimum CPUID feature set for kernel These bits are checked
really early to actually display a visible error message before the
- kernel dies. Only add word 0 bits here
+ kernel dies. Make sure to assign features to the proper mask!
Some requirements that are not in CPUID yet are also in the
- CONFIG_X86_MINIMUM_CPU mode which is checked too.
+ CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
The real information is in arch/i386/Kconfig.cpu, this just converts
the CONFIGs into a bitmask */
+#ifndef CONFIG_MATH_EMULATION
+# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
+#else
+# define NEED_FPU 0
+#endif
+
#ifdef CONFIG_X86_PAE
-#define NEED_PAE (1<<X86_FEATURE_PAE)
+# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
#else
-#define NEED_PAE 0
+# define NEED_PAE 0
#endif
#ifdef CONFIG_X86_CMOV
-#define NEED_CMOV (1<<X86_FEATURE_CMOV)
+# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
#else
-#define NEED_CMOV 0
+# define NEED_CMOV 0
#endif
#ifdef CONFIG_X86_CMPXCHG64
-#define NEED_CMPXCHG64 (1<<X86_FEATURE_CX8)
+# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
+#else
+# define NEED_CX8 0
+#endif
+
+#define REQUIRED_MASK0 (NEED_FPU|NEED_PAE|NEED_CMOV|NEED_CX8)
+
+#ifdef CONFIG_X86_USE_3DNOW
+# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
#else
-#define NEED_CMPXCHG64 0
+# define NEED_3DNOW 0
#endif
-#define REQUIRED_MASK1 (NEED_PAE|NEED_CMOV|NEED_CMPXCHG64)
+#define REQUIRED_MASK1 (NEED_3DNOW)
+
+#define REQUIRED_MASK2 0
+#define REQUIRED_MASK3 0
+#define REQUIRED_MASK4 0
+#define REQUIRED_MASK5 0
+#define REQUIRED_MASK6 0
+#define REQUIRED_MASK7 0
#endif
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 0e8077cbfda..7862fe858a9 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -26,12 +26,15 @@
#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
#ifndef __ASSEMBLY__
+
+#include <asm/bootparam.h>
+
/*
* This is set up by the setup-routine at boot-time
*/
-extern unsigned char boot_params[PARAM_SIZE];
+extern struct boot_params boot_params;
-#define PARAM (boot_params)
+#define PARAM ((char *)&boot_params)
#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
@@ -39,8 +42,7 @@ extern unsigned char boot_params[PARAM_SIZE];
#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
-#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
+#define SYS_DESC_TABLE (*(struct sys_desc_table *)(PARAM+0xa0))
#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
@@ -79,6 +81,10 @@ void __init add_memory_region(unsigned long long start,
extern unsigned long init_pg_tables_end;
+#ifndef CONFIG_PARAVIRT
+#define paravirt_post_allocator_init() do {} while (0)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 0c713278706..1f73bde165b 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -43,9 +43,12 @@ extern u8 x86_cpu_to_apicid[];
#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+extern void set_cpu_sibling_map(int cpu);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
+extern void remove_siblinginfo(int cpu);
#endif
struct smp_ops
@@ -129,6 +132,8 @@ extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern unsigned int num_processors;
+void __cpuinit smp_store_cpu_info(int id);
+
#endif /* !__ASSEMBLY__ */
#else /* CONFIG_SMP */
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 4cb0f91ae64..54424e045e0 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -137,6 +137,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_DEBUG 17 /* uses debug registers */
#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
#define TIF_FREEZE 19 /* is freezing for suspend */
+#define TIF_NOTSC 20 /* TSC is not accessible in userland */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -151,6 +152,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_DEBUG (1<<TIF_DEBUG)
#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
#define _TIF_FREEZE (1<<TIF_FREEZE)
+#define _TIF_NOTSC (1<<TIF_NOTSC)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
@@ -160,7 +162,8 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
/* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
+#define _TIF_WORK_CTXSW_NEXT (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUG)
+#define _TIF_WORK_CTXSW_PREV (_TIF_IO_BITMAP | _TIF_NOTSC)
/*
* Thread-synchronous status.
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index 153770e25fa..51a713e33a9 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -15,8 +15,38 @@ extern int no_sync_cmos_clock;
extern int recalibrate_cpu_khz(void);
#ifndef CONFIG_PARAVIRT
-#define get_scheduled_cycles(val) rdtscll(val)
#define calculate_cpu_khz() native_calculate_cpu_khz()
#endif
+/* Accellerators for sched_clock()
+ * convert from cycles(64bits) => nanoseconds (64bits)
+ * basic equation:
+ * ns = cycles / (freq / ns_per_sec)
+ * ns = cycles * (ns_per_sec / freq)
+ * ns = cycles * (10^9 / (cpu_khz * 10^3))
+ * ns = cycles * (10^6 / cpu_khz)
+ *
+ * Then we use scaling math (suggested by george@mvista.com) to get:
+ * ns = cycles * (10^6 * SC / cpu_khz) / SC
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * And since SC is a constant power of two, we can convert the div
+ * into a shift.
+ *
+ * We can use khz divisor instead of mhz to keep a better percision, since
+ * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
+ * (mathieu.desnoyers@polymtl.ca)
+ *
+ * -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+extern unsigned long cyc2ns_scale __read_mostly;
+
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+ return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+
#endif
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index e84ace1ec8b..9b15545eb9b 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -329,10 +329,11 @@
#define __NR_signalfd 321
#define __NR_timerfd 322
#define __NR_eventfd 323
+#define __NR_fallocate 324
#ifdef __KERNEL__
-#define NR_syscalls 324
+#define NR_syscalls 325
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h
index 213930b995c..47818813032 100644
--- a/include/asm-i386/vmi_time.h
+++ b/include/asm-i386/vmi_time.h
@@ -49,7 +49,7 @@ extern struct vmi_timer_ops {
extern void __init vmi_time_init(void);
extern unsigned long vmi_get_wallclock(void);
extern int vmi_set_wallclock(unsigned long now);
-extern unsigned long long vmi_get_sched_cycles(void);
+extern unsigned long long vmi_sched_clock(void);
extern unsigned long vmi_cpu_khz(void);
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/include/asm-i386/xen/hypercall.h b/include/asm-i386/xen/hypercall.h
new file mode 100644
index 00000000000..bc0ee7d961c
--- /dev/null
+++ b/include/asm-i386/xen/hypercall.h
@@ -0,0 +1,413 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERCALL_H__
+#define __HYPERCALL_H__
+
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/physdev.h>
+
+extern struct { char _entry[32]; } hypercall_page[];
+
+#define _hypercall0(type, name) \
+({ \
+ long __res; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res) \
+ : [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall1(type, name, a1) \
+({ \
+ long __res, __ign1; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res), "=b" (__ign1) \
+ : "1" ((long)(a1)), \
+ [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall2(type, name, a1, a2) \
+({ \
+ long __res, __ign1, __ign2; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall3(type, name, a1, a2, a3) \
+({ \
+ long __res, __ign1, __ign2, __ign3; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
+ "=d" (__ign3) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)), \
+ [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4) \
+({ \
+ long __res, __ign1, __ign2, __ign3, __ign4; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
+ "=d" (__ign3), "=S" (__ign4) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)), "4" ((long)(a4)), \
+ [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
+({ \
+ long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
+ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)), "4" ((long)(a4)), \
+ "5" ((long)(a5)), \
+ [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+static inline int
+HYPERVISOR_set_trap_table(struct trap_info *table)
+{
+ return _hypercall1(int, set_trap_table, table);
+}
+
+static inline int
+HYPERVISOR_mmu_update(struct mmu_update *req, int count,
+ int *success_count, domid_t domid)
+{
+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
+ int *success_count, domid_t domid)
+{
+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
+{
+ return _hypercall2(int, set_gdt, frame_list, entries);
+}
+
+static inline int
+HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
+{
+ return _hypercall2(int, stack_switch, ss, esp);
+}
+
+static inline int
+HYPERVISOR_set_callbacks(unsigned long event_selector,
+ unsigned long event_address,
+ unsigned long failsafe_selector,
+ unsigned long failsafe_address)
+{
+ return _hypercall4(int, set_callbacks,
+ event_selector, event_address,
+ failsafe_selector, failsafe_address);
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(int set)
+{
+ return _hypercall1(int, fpu_taskswitch, set);
+}
+
+static inline int
+HYPERVISOR_sched_op(int cmd, unsigned long arg)
+{
+ return _hypercall2(int, sched_op, cmd, arg);
+}
+
+static inline long
+HYPERVISOR_set_timer_op(u64 timeout)
+{
+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
+ unsigned long timeout_lo = (unsigned long)timeout;
+ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
+}
+
+static inline int
+HYPERVISOR_set_debugreg(int reg, unsigned long value)
+{
+ return _hypercall2(int, set_debugreg, reg, value);
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(int reg)
+{
+ return _hypercall1(unsigned long, get_debugreg, reg);
+}
+
+static inline int
+HYPERVISOR_update_descriptor(u64 ma, u64 desc)
+{
+ return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
+}
+
+static inline int
+HYPERVISOR_memory_op(unsigned int cmd, void *arg)
+{
+ return _hypercall2(int, memory_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_multicall(void *call_list, int nr_calls)
+{
+ return _hypercall2(int, multicall, call_list, nr_calls);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
+ unsigned long flags)
+{
+ unsigned long pte_hi = 0;
+#ifdef CONFIG_X86_PAE
+ pte_hi = new_val.pte_high;
+#endif
+ return _hypercall4(int, update_va_mapping, va,
+ new_val.pte_low, pte_hi, flags);
+}
+
+static inline int
+HYPERVISOR_event_channel_op(int cmd, void *arg)
+{
+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS)) {
+ struct evtchn_op op;
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, event_channel_op_compat, &op);
+ memcpy(arg, &op.u, sizeof(op.u));
+ }
+ return rc;
+}
+
+static inline int
+HYPERVISOR_xen_version(int cmd, void *arg)
+{
+ return _hypercall2(int, xen_version, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_console_io(int cmd, int count, char *str)
+{
+ return _hypercall3(int, console_io, cmd, count, str);
+}
+
+static inline int
+HYPERVISOR_physdev_op(int cmd, void *arg)
+{
+ int rc = _hypercall2(int, physdev_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS)) {
+ struct physdev_op op;
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, physdev_op_compat, &op);
+ memcpy(arg, &op.u, sizeof(op.u));
+ }
+ return rc;
+}
+
+static inline int
+HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
+{
+ return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
+ unsigned long flags, domid_t domid)
+{
+ unsigned long pte_hi = 0;
+#ifdef CONFIG_X86_PAE
+ pte_hi = new_val.pte_high;
+#endif
+ return _hypercall5(int, update_va_mapping_otherdomain, va,
+ new_val.pte_low, pte_hi, flags, domid);
+}
+
+static inline int
+HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
+{
+ return _hypercall2(int, vm_assist, cmd, type);
+}
+
+static inline int
+HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
+{
+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
+}
+
+static inline int
+HYPERVISOR_suspend(unsigned long srec)
+{
+ return _hypercall3(int, sched_op, SCHEDOP_shutdown,
+ SHUTDOWN_suspend, srec);
+}
+
+static inline int
+HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
+{
+ return _hypercall2(int, nmi_op, op, arg);
+}
+
+static inline void
+MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
+ pte_t new_val, unsigned long flags)
+{
+ mcl->op = __HYPERVISOR_update_va_mapping;
+ mcl->args[0] = va;
+#ifdef CONFIG_X86_PAE
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = new_val.pte_high;
+#else
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = 0;
+#endif
+ mcl->args[3] = flags;
+}
+
+static inline void
+MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
+ void *uop, unsigned int count)
+{
+ mcl->op = __HYPERVISOR_grant_table_op;
+ mcl->args[0] = cmd;
+ mcl->args[1] = (unsigned long)uop;
+ mcl->args[2] = count;
+}
+
+static inline void
+MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
+ pte_t new_val, unsigned long flags,
+ domid_t domid)
+{
+ mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
+ mcl->args[0] = va;
+#ifdef CONFIG_X86_PAE
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = new_val.pte_high;
+#else
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = 0;
+#endif
+ mcl->args[3] = flags;
+ mcl->args[4] = domid;
+}
+
+static inline void
+MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
+ struct desc_struct desc)
+{
+ mcl->op = __HYPERVISOR_update_descriptor;
+ mcl->args[0] = maddr;
+ mcl->args[1] = maddr >> 32;
+ mcl->args[2] = desc.a;
+ mcl->args[3] = desc.b;
+}
+
+static inline void
+MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
+{
+ mcl->op = __HYPERVISOR_memory_op;
+ mcl->args[0] = cmd;
+ mcl->args[1] = (unsigned long)arg;
+}
+
+static inline void
+MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
+ int count, int *success_count, domid_t domid)
+{
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (unsigned long)req;
+ mcl->args[1] = count;
+ mcl->args[2] = (unsigned long)success_count;
+ mcl->args[3] = domid;
+}
+
+static inline void
+MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
+ int *success_count, domid_t domid)
+{
+ mcl->op = __HYPERVISOR_mmuext_op;
+ mcl->args[0] = (unsigned long)op;
+ mcl->args[1] = count;
+ mcl->args[2] = (unsigned long)success_count;
+ mcl->args[3] = domid;
+}
+
+static inline void
+MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
+{
+ mcl->op = __HYPERVISOR_set_gdt;
+ mcl->args[0] = (unsigned long)frames;
+ mcl->args[1] = entries;
+}
+
+static inline void
+MULTI_stack_switch(struct multicall_entry *mcl,
+ unsigned long ss, unsigned long esp)
+{
+ mcl->op = __HYPERVISOR_stack_switch;
+ mcl->args[0] = ss;
+ mcl->args[1] = esp;
+}
+
+#endif /* __HYPERCALL_H__ */
diff --git a/include/asm-i386/xen/hypervisor.h b/include/asm-i386/xen/hypervisor.h
new file mode 100644
index 00000000000..8e15dd28c91
--- /dev/null
+++ b/include/asm-i386/xen/hypervisor.h
@@ -0,0 +1,73 @@
+/******************************************************************************
+ * hypervisor.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERVISOR_H__
+#define __HYPERVISOR_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/version.h>
+
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/desc.h>
+#if defined(__i386__)
+# ifdef CONFIG_X86_PAE
+# include <asm-generic/pgtable-nopud.h>
+# else
+# include <asm-generic/pgtable-nopmd.h>
+# endif
+#endif
+#include <asm/xen/hypercall.h>
+
+/* arch/i386/kernel/setup.c */
+extern struct shared_info *HYPERVISOR_shared_info;
+extern struct start_info *xen_start_info;
+#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
+
+/* arch/i386/mach-xen/evtchn.c */
+/* Force a proper event-channel callback from Xen. */
+extern void force_evtchn_callback(void);
+
+/* Turn jiffies into Xen system time. */
+u64 jiffies_to_st(unsigned long jiffies);
+
+
+#define MULTI_UVMFLAGS_INDEX 3
+#define MULTI_UVMDOMID_INDEX 4
+
+#define is_running_on_xen() (xen_start_info ? 1 : 0)
+
+#endif /* __HYPERVISOR_H__ */
diff --git a/include/asm-i386/xen/interface.h b/include/asm-i386/xen/interface.h
new file mode 100644
index 00000000000..165c3968e13
--- /dev/null
+++ b/include/asm-i386/xen/interface.h
@@ -0,0 +1,188 @@
+/******************************************************************************
+ * arch-x86_32.h
+ *
+ * Guest OS interface to x86 32-bit Xen.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
+#define __XEN_PUBLIC_ARCH_X86_32_H__
+
+#ifdef __XEN__
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef type * __guest_handle_ ## name
+#endif
+
+#define DEFINE_GUEST_HANDLE_STRUCT(name) \
+ __DEFINE_GUEST_HANDLE(name, struct name)
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name) __guest_handle_ ## name
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint, unsigned int);
+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(long);
+DEFINE_GUEST_HANDLE(void);
+#endif
+
+/*
+ * SEGMENT DESCRIPTOR TABLES
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way, at the far end of the GDT.
+ */
+#define FIRST_RESERVED_GDT_PAGE 14
+#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
+
+/*
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
+#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
+#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
+#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
+#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
+#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
+
+#define FLAT_KERNEL_CS FLAT_RING1_CS
+#define FLAT_KERNEL_DS FLAT_RING1_DS
+#define FLAT_KERNEL_SS FLAT_RING1_SS
+#define FLAT_USER_CS FLAT_RING3_CS
+#define FLAT_USER_DS FLAT_RING3_DS
+#define FLAT_USER_SS FLAT_RING3_SS
+
+/* And the trap vector is... */
+#define TRAP_INSTR "int $0x82"
+
+/*
+ * Virtual addresses beyond this are not modifiable by guest OSes. The
+ * machine->physical mapping table starts at this address, read-only.
+ */
+#ifdef CONFIG_X86_PAE
+#define __HYPERVISOR_VIRT_START 0xF5800000
+#else
+#define __HYPERVISOR_VIRT_START 0xFC000000
+#endif
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
+#endif
+
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#endif
+
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 32
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table()
+ */
+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
+#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
+
+struct trap_info {
+ uint8_t vector; /* exception vector */
+ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
+ uint16_t cs; /* code selector */
+ unsigned long address; /* code offset */
+};
+DEFINE_GUEST_HANDLE_STRUCT(trap_info);
+
+struct cpu_user_regs {
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t ebp;
+ uint32_t eax;
+ uint16_t error_code; /* private */
+ uint16_t entry_vector; /* private */
+ uint32_t eip;
+ uint16_t cs;
+ uint8_t saved_upcall_mask;
+ uint8_t _pad0;
+ uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
+ uint32_t esp;
+ uint16_t ss, _pad1;
+ uint16_t es, _pad2;
+ uint16_t ds, _pad3;
+ uint16_t fs, _pad4;
+ uint16_t gs, _pad5;
+};
+DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
+
+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
+
+/*
+ * The following is all CPU context. Note that the fpu_ctxt block is filled
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ */
+struct vcpu_guest_context {
+ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
+ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
+#define VGCF_I387_VALID (1<<0)
+#define VGCF_HVM_GUEST (1<<1)
+#define VGCF_IN_KERNEL (1<<2)
+ unsigned long flags; /* VGCF_* flags */
+ struct cpu_user_regs user_regs; /* User-level CPU registers */
+ struct trap_info trap_ctxt[256]; /* Virtual IDT */
+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
+ unsigned long event_callback_cs; /* CS:EIP of event callback */
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
+ unsigned long failsafe_callback_eip;
+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
+};
+DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
+
+struct arch_shared_info {
+ unsigned long max_pfn; /* max pfn that appears in table */
+ /* Frame containing list of mfns containing list of mfns containing p2m. */
+ unsigned long pfn_to_mfn_frame_list_list;
+ unsigned long nmi_reason;
+};
+
+struct arch_vcpu_info {
+ unsigned long cr2;
+ unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Prefix forces emulation of some non-trapping instructions.
+ * Currently only CPUID.
+ */
+#ifdef __ASSEMBLY__
+#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
+#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
+#else
+#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
+#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
+#endif
+
+#endif