aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/Kbuild28
-rw-r--r--arch/x86/include/asm/a.out.h20
-rw-r--r--arch/x86/include/asm/acenv.h49
-rw-r--r--arch/x86/include/asm/acpi.h63
-rw-r--r--arch/x86/include/asm/alternative-asm.h9
-rw-r--r--arch/x86/include/asm/alternative.h106
-rw-r--r--arch/x86/include/asm/amd_nb.h40
-rw-r--r--arch/x86/include/asm/apic.h141
-rw-r--r--arch/x86/include/asm/apicdef.h2
-rw-r--r--arch/x86/include/asm/archrandom.h63
-rw-r--r--arch/x86/include/asm/asm.h51
-rw-r--r--arch/x86/include/asm/atomic.h80
-rw-r--r--arch/x86/include/asm/atomic64_32.h156
-rw-r--r--arch/x86/include/asm/atomic64_64.h28
-rw-r--r--arch/x86/include/asm/auxvec.h12
-rw-r--r--arch/x86/include/asm/barrier.h157
-rw-r--r--arch/x86/include/asm/bitops.h99
-rw-r--r--arch/x86/include/asm/bitsperlong.h13
-rw-r--r--arch/x86/include/asm/boot.h11
-rw-r--r--arch/x86/include/asm/bootparam.h136
-rw-r--r--arch/x86/include/asm/bootparam_utils.h54
-rw-r--r--arch/x86/include/asm/bug.h4
-rw-r--r--arch/x86/include/asm/byteorder.h6
-rw-r--r--arch/x86/include/asm/cacheflush.h1
-rw-r--r--arch/x86/include/asm/calling.h100
-rw-r--r--arch/x86/include/asm/checksum.h4
-rw-r--r--arch/x86/include/asm/checksum_32.h22
-rw-r--r--arch/x86/include/asm/checksum_64.h11
-rw-r--r--arch/x86/include/asm/clocksource.h5
-rw-r--r--arch/x86/include/asm/cmdline.h6
-rw-r--r--arch/x86/include/asm/cmpxchg.h10
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h55
-rw-r--r--arch/x86/include/asm/compat.h114
-rw-r--r--arch/x86/include/asm/context_tracking.h10
-rw-r--r--arch/x86/include/asm/cpu.h4
-rw-r--r--arch/x86/include/asm/cpu_device_id.h13
-rw-r--r--arch/x86/include/asm/cpufeature.h194
-rw-r--r--arch/x86/include/asm/cputime.h1
-rw-r--r--arch/x86/include/asm/crypto/aes.h (renamed from arch/x86/include/asm/aes.h)0
-rw-r--r--arch/x86/include/asm/crypto/camellia.h101
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h145
-rw-r--r--arch/x86/include/asm/crypto/serpent-avx.h48
-rw-r--r--arch/x86/include/asm/crypto/serpent-sse2.h (renamed from arch/x86/include/asm/serpent.h)4
-rw-r--r--arch/x86/include/asm/crypto/twofish.h46
-rw-r--r--arch/x86/include/asm/current.h2
-rw-r--r--arch/x86/include/asm/debugreg.h138
-rw-r--r--arch/x86/include/asm/desc.h125
-rw-r--r--arch/x86/include/asm/device.h7
-rw-r--r--arch/x86/include/asm/dma-contiguous.h12
-rw-r--r--arch/x86/include/asm/dma-mapping.h41
-rw-r--r--arch/x86/include/asm/dmi.h6
-rw-r--r--arch/x86/include/asm/e820.h76
-rw-r--r--arch/x86/include/asm/efi.h208
-rw-r--r--arch/x86/include/asm/elf.h67
-rw-r--r--arch/x86/include/asm/emergency-restart.h14
-rw-r--r--arch/x86/include/asm/entry_arch.h19
-rw-r--r--arch/x86/include/asm/errno.h1
-rw-r--r--arch/x86/include/asm/espfix.h16
-rw-r--r--arch/x86/include/asm/exec.h1
-rw-r--r--arch/x86/include/asm/fcntl.h1
-rw-r--r--arch/x86/include/asm/fixmap.h98
-rw-r--r--arch/x86/include/asm/floppy.h6
-rw-r--r--arch/x86/include/asm/fpu-internal.h619
-rw-r--r--arch/x86/include/asm/ftrace.h82
-rw-r--r--arch/x86/include/asm/futex.h45
-rw-r--r--arch/x86/include/asm/gpio.h57
-rw-r--r--arch/x86/include/asm/hardirq.h20
-rw-r--r--arch/x86/include/asm/hash.h7
-rw-r--r--arch/x86/include/asm/highmem.h2
-rw-r--r--arch/x86/include/asm/hpet.h8
-rw-r--r--arch/x86/include/asm/hugetlb.h6
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h5
-rw-r--r--arch/x86/include/asm/hw_irq.h156
-rw-r--r--arch/x86/include/asm/hyperv.h194
-rw-r--r--arch/x86/include/asm/hypervisor.h30
-rw-r--r--arch/x86/include/asm/i387.h561
-rw-r--r--arch/x86/include/asm/ia32.h84
-rw-r--r--arch/x86/include/asm/idle.h1
-rw-r--r--arch/x86/include/asm/inat.h5
-rw-r--r--arch/x86/include/asm/init.h28
-rw-r--r--arch/x86/include/asm/insn.h18
-rw-r--r--arch/x86/include/asm/inst.h74
-rw-r--r--arch/x86/include/asm/intel-mid.h157
-rw-r--r--arch/x86/include/asm/intel_mid_vrtc.h9
-rw-r--r--arch/x86/include/asm/io.h23
-rw-r--r--arch/x86/include/asm/io_apic.h56
-rw-r--r--arch/x86/include/asm/ioctl.h1
-rw-r--r--arch/x86/include/asm/ioctls.h1
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/iommu_table.h6
-rw-r--r--arch/x86/include/asm/iosf_mbi.h145
-rw-r--r--arch/x86/include/asm/ipcbuf.h1
-rw-r--r--arch/x86/include/asm/irq.h8
-rw-r--r--arch/x86/include/asm/irq_controller.h12
-rw-r--r--arch/x86/include/asm/irq_regs.h4
-rw-r--r--arch/x86/include/asm/irq_remapping.h124
-rw-r--r--arch/x86/include/asm/irq_vectors.h20
-rw-r--r--arch/x86/include/asm/irqflags.h2
-rw-r--r--arch/x86/include/asm/ist.h17
-rw-r--r--arch/x86/include/asm/jump_label.h13
-rw-r--r--arch/x86/include/asm/kbdleds.h17
-rw-r--r--arch/x86/include/asm/kdebug.h3
-rw-r--r--arch/x86/include/asm/kexec.h9
-rw-r--r--arch/x86/include/asm/kgdb.h10
-rw-r--r--arch/x86/include/asm/kprobes.h14
-rw-r--r--arch/x86/include/asm/kvm.h324
-rw-r--r--arch/x86/include/asm/kvm_emulate.h68
-rw-r--r--arch/x86/include/asm/kvm_guest.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h318
-rw-r--r--arch/x86/include/asm/kvm_para.h148
-rw-r--r--arch/x86/include/asm/ldt.h40
-rw-r--r--arch/x86/include/asm/lguest.h17
-rw-r--r--arch/x86/include/asm/linkage.h18
-rw-r--r--arch/x86/include/asm/local.h47
-rw-r--r--arch/x86/include/asm/mc146818rtc.h5
-rw-r--r--arch/x86/include/asm/mca.h43
-rw-r--r--arch/x86/include/asm/mca_dma.h201
-rw-r--r--arch/x86/include/asm/mce.h122
-rw-r--r--arch/x86/include/asm/microcode.h40
-rw-r--r--arch/x86/include/asm/microcode_amd.h77
-rw-r--r--arch/x86/include/asm/microcode_intel.h87
-rw-r--r--arch/x86/include/asm/misc.h6
-rw-r--r--arch/x86/include/asm/mman.h8
-rw-r--r--arch/x86/include/asm/mmconfig.h4
-rw-r--r--arch/x86/include/asm/mmu.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h32
-rw-r--r--arch/x86/include/asm/mmzone.h4
-rw-r--r--arch/x86/include/asm/mmzone_32.h15
-rw-r--r--arch/x86/include/asm/module.h2
-rw-r--r--arch/x86/include/asm/mpspec.h11
-rw-r--r--arch/x86/include/asm/mpspec_def.h3
-rw-r--r--arch/x86/include/asm/mrst-vrtc.h9
-rw-r--r--arch/x86/include/asm/mrst.h81
-rw-r--r--arch/x86/include/asm/msgbuf.h1
-rw-r--r--arch/x86/include/asm/mshyperv.h9
-rw-r--r--arch/x86/include/asm/msr-index.h465
-rw-r--r--arch/x86/include/asm/msr.h104
-rw-r--r--arch/x86/include/asm/mtrr.h95
-rw-r--r--arch/x86/include/asm/mutex.h4
-rw-r--r--arch/x86/include/asm/mutex_32.h11
-rw-r--r--arch/x86/include/asm/mutex_64.h41
-rw-r--r--arch/x86/include/asm/mwait.h46
-rw-r--r--arch/x86/include/asm/nmi.h29
-rw-r--r--arch/x86/include/asm/nops.h4
-rw-r--r--arch/x86/include/asm/numa.h14
-rw-r--r--arch/x86/include/asm/numa_64.h6
-rw-r--r--arch/x86/include/asm/numachip/numachip.h19
-rw-r--r--arch/x86/include/asm/numaq.h171
-rw-r--r--arch/x86/include/asm/olpc.h19
-rw-r--r--arch/x86/include/asm/page.h8
-rw-r--r--arch/x86/include/asm/page_32.h5
-rw-r--r--arch/x86/include/asm/page_32_types.h6
-rw-r--r--arch/x86/include/asm/page_64.h36
-rw-r--r--arch/x86/include/asm/page_64_types.h45
-rw-r--r--arch/x86/include/asm/page_types.h8
-rw-r--r--arch/x86/include/asm/param.h1
-rw-r--r--arch/x86/include/asm/paravirt.h104
-rw-r--r--arch/x86/include/asm/paravirt_types.h31
-rw-r--r--arch/x86/include/asm/parport.h4
-rw-r--r--arch/x86/include/asm/pci.h61
-rw-r--r--arch/x86/include/asm/pci_x86.h15
-rw-r--r--arch/x86/include/asm/percpu.h153
-rw-r--r--arch/x86/include/asm/perf_event.h76
-rw-r--r--arch/x86/include/asm/perf_event_p4.h62
-rw-r--r--arch/x86/include/asm/pgalloc.h11
-rw-r--r--arch/x86/include/asm/pgtable-2level.h63
-rw-r--r--arch/x86/include/asm/pgtable-3level.h63
-rw-r--r--arch/x86/include/asm/pgtable.h150
-rw-r--r--arch/x86/include/asm/pgtable_32.h6
-rw-r--r--arch/x86/include/asm/pgtable_64.h23
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h8
-rw-r--r--arch/x86/include/asm/pgtable_types.h101
-rw-r--r--arch/x86/include/asm/poll.h1
-rw-r--r--arch/x86/include/asm/posix_types.h12
-rw-r--r--arch/x86/include/asm/posix_types_32.h85
-rw-r--r--arch/x86/include/asm/posix_types_64.h119
-rw-r--r--arch/x86/include/asm/prctl.h9
-rw-r--r--arch/x86/include/asm/preempt.h111
-rw-r--r--arch/x86/include/asm/processor-flags.h94
-rw-r--r--arch/x86/include/asm/processor.h309
-rw-r--r--arch/x86/include/asm/prom.h17
-rw-r--r--arch/x86/include/asm/proto.h4
-rw-r--r--arch/x86/include/asm/ptrace-abi.h87
-rw-r--r--arch/x86/include/asm/ptrace.h124
-rw-r--r--arch/x86/include/asm/pvclock-abi.h1
-rw-r--r--arch/x86/include/asm/pvclock.h48
-rw-r--r--arch/x86/include/asm/qrwlock.h17
-rw-r--r--arch/x86/include/asm/realmode.h64
-rw-r--r--arch/x86/include/asm/reboot.h4
-rw-r--r--arch/x86/include/asm/required-features.h8
-rw-r--r--arch/x86/include/asm/resource.h1
-rw-r--r--arch/x86/include/asm/rmwcc.h41
-rw-r--r--arch/x86/include/asm/rwsem.h28
-rw-r--r--arch/x86/include/asm/seccomp.h4
-rw-r--r--arch/x86/include/asm/segment.h63
-rw-r--r--arch/x86/include/asm/sembuf.h24
-rw-r--r--arch/x86/include/asm/setup.h26
-rw-r--r--arch/x86/include/asm/shmbuf.h1
-rw-r--r--arch/x86/include/asm/sigcontext.h213
-rw-r--r--arch/x86/include/asm/sigcontext32.h77
-rw-r--r--arch/x86/include/asm/sigframe.h13
-rw-r--r--arch/x86/include/asm/sighandling.h22
-rw-r--r--arch/x86/include/asm/siginfo.h10
-rw-r--r--arch/x86/include/asm/signal.h170
-rw-r--r--arch/x86/include/asm/simd.h11
-rw-r--r--arch/x86/include/asm/smap.h91
-rw-r--r--arch/x86/include/asm/smp.h40
-rw-r--r--arch/x86/include/asm/socket.h1
-rw-r--r--arch/x86/include/asm/sockios.h1
-rw-r--r--arch/x86/include/asm/special_insns.h207
-rw-r--r--arch/x86/include/asm/spinlock.h151
-rw-r--r--arch/x86/include/asm/spinlock_types.h21
-rw-r--r--arch/x86/include/asm/sta2x11.h12
-rw-r--r--arch/x86/include/asm/stackprotector.h5
-rw-r--r--arch/x86/include/asm/stat.h114
-rw-r--r--arch/x86/include/asm/statfs.h12
-rw-r--r--arch/x86/include/asm/string.h4
-rw-r--r--arch/x86/include/asm/suspend.h4
-rw-r--r--arch/x86/include/asm/suspend_32.h2
-rw-r--r--arch/x86/include/asm/suspend_64.h5
-rw-r--r--arch/x86/include/asm/svm.h79
-rw-r--r--arch/x86/include/asm/swab.h61
-rw-r--r--arch/x86/include/asm/swiotlb.h7
-rw-r--r--arch/x86/include/asm/switch_to.h129
-rw-r--r--arch/x86/include/asm/sync_bitops.h26
-rw-r--r--arch/x86/include/asm/sys_ia32.h43
-rw-r--r--arch/x86/include/asm/syscall.h29
-rw-r--r--arch/x86/include/asm/syscalls.h31
-rw-r--r--arch/x86/include/asm/sysfb.h98
-rw-r--r--arch/x86/include/asm/system.h523
-rw-r--r--arch/x86/include/asm/termbits.h1
-rw-r--r--arch/x86/include/asm/termios.h1
-rw-r--r--arch/x86/include/asm/thread_info.h142
-rw-r--r--arch/x86/include/asm/timer.h82
-rw-r--r--arch/x86/include/asm/tlb.h9
-rw-r--r--arch/x86/include/asm/tlbflush.h119
-rw-r--r--arch/x86/include/asm/topology.h64
-rw-r--r--arch/x86/include/asm/trace/exceptions.h52
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h115
-rw-r--r--arch/x86/include/asm/trace_clock.h20
-rw-r--r--arch/x86/include/asm/trampoline.h39
-rw-r--r--arch/x86/include/asm/traps.h59
-rw-r--r--arch/x86/include/asm/tsc.h8
-rw-r--r--arch/x86/include/asm/types.h6
-rw-r--r--arch/x86/include/asm/uaccess.h389
-rw-r--r--arch/x86/include/asm/uaccess_32.h54
-rw-r--r--arch/x86/include/asm/uaccess_64.h75
-rw-r--r--arch/x86/include/asm/ucontext.h12
-rw-r--r--arch/x86/include/asm/unistd.h37
-rw-r--r--arch/x86/include/asm/uprobes.h67
-rw-r--r--arch/x86/include/asm/user.h4
-rw-r--r--arch/x86/include/asm/uv/uv.h5
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h32
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h101
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1567
-rw-r--r--arch/x86/include/asm/vdso.h71
-rw-r--r--arch/x86/include/asm/vga.h6
-rw-r--r--arch/x86/include/asm/vgtod.h76
-rw-r--r--arch/x86/include/asm/virtext.h1
-rw-r--r--arch/x86/include/asm/visws/cobalt.h127
-rw-r--r--arch/x86/include/asm/visws/lithium.h53
-rw-r--r--arch/x86/include/asm/visws/piix4.h107
-rw-r--r--arch/x86/include/asm/visws/sgivw.h5
-rw-r--r--arch/x86/include/asm/vm86.h128
-rw-r--r--arch/x86/include/asm/vmx.h98
-rw-r--r--arch/x86/include/asm/vsyscall.h34
-rw-r--r--arch/x86/include/asm/vvar.h17
-rw-r--r--arch/x86/include/asm/word-at-a-time.h105
-rw-r--r--arch/x86/include/asm/x2apic.h17
-rw-r--r--arch/x86/include/asm/x86_init.h71
-rw-r--r--arch/x86/include/asm/xen/events.h5
-rw-r--r--arch/x86/include/asm/xen/hypercall.h31
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h17
-rw-r--r--arch/x86/include/asm/xen/interface.h21
-rw-r--r--arch/x86/include/asm/xen/page-coherent.h38
-rw-r--r--arch/x86/include/asm/xen/page.h54
-rw-r--r--arch/x86/include/asm/xen/swiotlb-xen.h2
-rw-r--r--arch/x86/include/asm/xor.h495
-rw-r--r--arch/x86/include/asm/xor_32.h353
-rw-r--r--arch/x86/include/asm/xor_64.h352
-rw-r--r--arch/x86/include/asm/xor_avx.h184
-rw-r--r--arch/x86/include/asm/xsave.h59
282 files changed, 9602 insertions, 9030 deletions
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index b57e6a43a37..3ca9762e164 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -1,26 +1,10 @@
-include include/asm-generic/Kbuild.asm
-header-y += boot.h
-header-y += bootparam.h
-header-y += debugreg.h
-header-y += e820.h
-header-y += hw_breakpoint.h
-header-y += hyperv.h
-header-y += ist.h
-header-y += ldt.h
-header-y += mce.h
-header-y += msr-index.h
-header-y += msr.h
-header-y += mtrr.h
-header-y += posix_types_32.h
-header-y += posix_types_64.h
-header-y += prctl.h
-header-y += processor-flags.h
-header-y += ptrace-abi.h
-header-y += sigcontext32.h
-header-y += ucontext.h
-header-y += vm86.h
-header-y += vsyscall.h
genhdr-y += unistd_32.h
genhdr-y += unistd_64.h
+genhdr-y += unistd_x32.h
+
+generic-y += clkdev.h
+generic-y += early_ioremap.h
+generic-y += cputime.h
+generic-y += mcs_spinlock.h
diff --git a/arch/x86/include/asm/a.out.h b/arch/x86/include/asm/a.out.h
deleted file mode 100644
index 4684f97a5bb..00000000000
--- a/arch/x86/include/asm/a.out.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _ASM_X86_A_OUT_H
-#define _ASM_X86_A_OUT_H
-
-struct exec
-{
- unsigned int a_info; /* Use macros N_MAGIC, etc for access */
- unsigned a_text; /* length of text, in bytes */
- unsigned a_data; /* length of data, in bytes */
- unsigned a_bss; /* length of uninitialized data area for file, in bytes */
- unsigned a_syms; /* length of symbol table data in file, in bytes */
- unsigned a_entry; /* start address */
- unsigned a_trsize; /* length of relocation info for text, in bytes */
- unsigned a_drsize; /* length of relocation info for data, in bytes */
-};
-
-#define N_TRSIZE(a) ((a).a_trsize)
-#define N_DRSIZE(a) ((a).a_drsize)
-#define N_SYMSIZE(a) ((a).a_syms)
-
-#endif /* _ASM_X86_A_OUT_H */
diff --git a/arch/x86/include/asm/acenv.h b/arch/x86/include/asm/acenv.h
new file mode 100644
index 00000000000..66873297e9f
--- /dev/null
+++ b/arch/x86/include/asm/acenv.h
@@ -0,0 +1,49 @@
+/*
+ * X86 specific ACPICA environments and implementation
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Author: Lv Zheng <lv.zheng@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_X86_ACENV_H
+#define _ASM_X86_ACENV_H
+
+#include <asm/special_insns.h>
+
+/* Asm macros */
+
+#define ACPI_FLUSH_CPU_CACHE() wbinvd()
+
+#ifdef CONFIG_ACPI
+
+int __acpi_acquire_global_lock(unsigned int *lock);
+int __acpi_release_global_lock(unsigned int *lock);
+
+#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
+ ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
+
+#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
+ ((Acq) = __acpi_release_global_lock(&facs->global_lock))
+
+/*
+ * Math helper asm macros
+ */
+#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
+ asm("divl %2;" \
+ : "=a"(q32), "=d"(r32) \
+ : "r"(d32), \
+ "0"(n_lo), "1"(n_hi))
+
+#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
+ asm("shrl $1,%2 ;" \
+ "rcrl $1,%3;" \
+ : "=r"(n_hi), "=r"(n_lo) \
+ : "0"(n_hi), "1"(n_lo))
+
+#endif
+
+#endif /* _ASM_X86_ACENV_H */
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 610001d385d..e06225eda63 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -26,59 +26,11 @@
#include <acpi/pdc_intel.h>
#include <asm/numa.h>
+#include <asm/fixmap.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mpspec.h>
-#include <asm/trampoline.h>
-
-#define COMPILER_DEPENDENT_INT64 long long
-#define COMPILER_DEPENDENT_UINT64 unsigned long long
-
-/*
- * Calling conventions:
- *
- * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
- * ACPI_EXTERNAL_XFACE - External ACPI interfaces
- * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
- * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
- */
-#define ACPI_SYSTEM_XFACE
-#define ACPI_EXTERNAL_XFACE
-#define ACPI_INTERNAL_XFACE
-#define ACPI_INTERNAL_VAR_XFACE
-
-/* Asm macros */
-
-#define ACPI_ASM_MACROS
-#define BREAKPOINT3
-#define ACPI_DISABLE_IRQS() local_irq_disable()
-#define ACPI_ENABLE_IRQS() local_irq_enable()
-#define ACPI_FLUSH_CPU_CACHE() wbinvd()
-
-int __acpi_acquire_global_lock(unsigned int *lock);
-int __acpi_release_global_lock(unsigned int *lock);
-
-#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
- ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
-
-#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
- ((Acq) = __acpi_release_global_lock(&facs->global_lock))
-
-/*
- * Math helper asm macros
- */
-#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
- asm("divl %2;" \
- : "=a"(q32), "=d"(r32) \
- : "r"(d32), \
- "0"(n_lo), "1"(n_hi))
-
-
-#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
- asm("shrl $1,%2 ;" \
- "rcrl $1,%3;" \
- : "=r"(n_hi), "=r"(n_lo) \
- : "0"(n_hi), "1"(n_lo))
+#include <asm/realmode.h>
#ifdef CONFIG_ACPI
extern int acpi_lapic;
@@ -90,6 +42,7 @@ extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
extern int acpi_fix_pin2_polarity;
+extern int acpi_disable_cmcff;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
@@ -115,13 +68,10 @@ static inline void acpi_disable_pci(void)
}
/* Low-level suspend routine. */
-extern int acpi_suspend_lowlevel(void);
-
-extern const unsigned char acpi_wakeup_code[];
-#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
+extern int (*acpi_suspend_lowlevel)(void);
-/* early initialization routine */
-extern void acpi_reserve_wakeup_memory(void);
+/* Physical address to resume after wakeup */
+#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
/*
* Check if the CPU can handle C2 and deeper
@@ -175,6 +125,7 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
#define acpi_lapic 0
#define acpi_ioapic 0
+#define acpi_disable_cmcff 0
static inline void acpi_noirq_set(void) { }
static inline void acpi_disable_pci(void) { }
static inline void disable_acpi(void) { }
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 952bd0100c5..372231c22a4 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -1,3 +1,6 @@
+#ifndef _ASM_X86_ALTERNATIVE_ASM_H
+#define _ASM_X86_ALTERNATIVE_ASM_H
+
#ifdef __ASSEMBLY__
#include <asm/asm.h>
@@ -5,10 +8,10 @@
#ifdef CONFIG_SMP
.macro LOCK_PREFIX
672: lock
- .section .smp_locks,"a"
+ .pushsection .smp_locks,"a"
.balign 4
.long 672b - .
- .previous
+ .popsection
.endm
#else
.macro LOCK_PREFIX
@@ -24,3 +27,5 @@
.endm
#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 37ad100a221..0a3f9c9f98d 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -5,6 +5,7 @@
#include <linux/stddef.h>
#include <linux/stringify.h>
#include <asm/asm.h>
+#include <asm/ptrace.h>
/*
* Alternative inline assembly for SMP.
@@ -29,10 +30,10 @@
#ifdef CONFIG_SMP
#define LOCK_PREFIX_HERE \
- ".section .smp_locks,\"a\"\n" \
- ".balign 4\n" \
- ".long 671f - .\n" /* offset */ \
- ".previous\n" \
+ ".pushsection .smp_locks,\"a\"\n" \
+ ".balign 4\n" \
+ ".long 671f - .\n" /* offset */ \
+ ".popsection\n" \
"671:"
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
@@ -60,7 +61,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end);
extern void alternatives_smp_module_del(struct module *mod);
-extern void alternatives_smp_switch(int smp);
+extern void alternatives_enable_smp(void);
extern int alternatives_text_reserved(void *start, void *end);
extern bool skip_smp_alternatives;
#else
@@ -68,30 +69,61 @@ static inline void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end) {}
static inline void alternatives_smp_module_del(struct module *mod) {}
-static inline void alternatives_smp_switch(int smp) {}
+static inline void alternatives_enable_smp(void) {}
static inline int alternatives_text_reserved(void *start, void *end)
{
return 0;
}
#endif /* CONFIG_SMP */
+#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
+
+#define b_replacement(number) "663"#number
+#define e_replacement(number) "664"#number
+
+#define alt_slen "662b-661b"
+#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
+
+#define ALTINSTR_ENTRY(feature, number) \
+ " .long 661b - .\n" /* label */ \
+ " .long " b_replacement(number)"f - .\n" /* new instruction */ \
+ " .word " __stringify(feature) "\n" /* feature bit */ \
+ " .byte " alt_slen "\n" /* source len */ \
+ " .byte " alt_rlen(number) "\n" /* replacement len */
+
+#define DISCARD_ENTRY(number) /* rlen <= slen */ \
+ " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
+
+#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
+ b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
+
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
- \
- "661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .long 661b - .\n" /* label */ \
- " .long 663f - .\n" /* new instruction */ \
- " .word " __stringify(feature) "\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .discard,\"aw\",@progbits\n" \
- " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
- ".previous\n" \
- ".section .altinstr_replacement, \"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous"
+ OLDINSTR(oldinstr) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature, 1) \
+ ".popsection\n" \
+ ".pushsection .discard,\"aw\",@progbits\n" \
+ DISCARD_ENTRY(1) \
+ ".popsection\n" \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
+ ".popsection"
+
+#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
+ OLDINSTR(oldinstr) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature1, 1) \
+ ALTINSTR_ENTRY(feature2, 2) \
+ ".popsection\n" \
+ ".pushsection .discard,\"aw\",@progbits\n" \
+ DISCARD_ENTRY(1) \
+ DISCARD_ENTRY(2) \
+ ".popsection\n" \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
+ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
+ ".popsection"
/*
* This must be included *after* the definition of ALTERNATIVE due to
@@ -140,11 +172,30 @@ static inline int alternatives_text_reserved(void *start, void *end)
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
/*
+ * Like alternative_call, but there are two features and respective functions.
+ * If CPU has feature2, function2 is used.
+ * Otherwise, if CPU has feature1, function1 is used.
+ * Otherwise, old function is used.
+ */
+#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
+ output, input...) \
+ asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
+ "call %P[new2]", feature2) \
+ : output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
+ [new2] "i" (newfunc2), ## input)
+
+/*
* use this macro(s) if you need more than one output parameter
* in alternative_io
*/
#define ASM_OUTPUT2(a...) a
+/*
+ * use this macro if you need clobbers but no inputs in
+ * alternative_{input,io,call}()
+ */
+#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
+
struct paravirt_patch_site;
#ifdef CONFIG_PARAVIRT
void apply_paravirt(struct paravirt_patch_site *start,
@@ -170,20 +221,11 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
* no thread can be preempted in the instructions being modified (no iret to an
* invalid instruction possible) or if the instructions are changed from a
* consistent state to another consistent state atomically.
- * More care must be taken when modifying code in the SMP case because of
- * Intel's errata. text_poke_smp() takes care that errata, but still
- * doesn't support NMI/MCE handler code modifying.
* On the local CPU you need to be protected again NMI or MCE handlers seeing an
* inconsistent instruction while you patch.
*/
-struct text_poke_param {
- void *addr;
- const void *opcode;
- size_t len;
-};
-
extern void *text_poke(void *addr, const void *opcode, size_t len);
-extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
-extern void text_poke_smp_batch(struct text_poke_param *params, int n);
+extern int poke_int3_handler(struct pt_regs *regs);
+extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 49ad773f4b9..aaac3b2fb74 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -19,17 +19,38 @@ extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void);
extern int amd_numa_init(void);
extern int amd_get_subcaches(int);
-extern int amd_set_subcaches(int, int);
+extern int amd_set_subcaches(int, unsigned long);
struct amd_l3_cache {
unsigned indices;
u8 subcaches[4];
};
+struct threshold_block {
+ unsigned int block;
+ unsigned int bank;
+ unsigned int cpu;
+ u32 address;
+ u16 interrupt_enable;
+ bool interrupt_capable;
+ u16 threshold_limit;
+ struct kobject kobj;
+ struct list_head miscj;
+};
+
+struct threshold_bank {
+ struct kobject *kobj;
+ struct threshold_block *blocks;
+
+ /* initialized to the number of CPUs on the node sharing this bank */
+ atomic_t cpus;
+};
+
struct amd_northbridge {
struct pci_dev *misc;
struct pci_dev *link;
struct amd_l3_cache l3_cache;
+ struct threshold_bank *bank4;
};
struct amd_northbridge_info {
@@ -60,6 +81,23 @@ static inline struct amd_northbridge *node_to_amd_nb(int node)
return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
}
+static inline u16 amd_get_node_id(struct pci_dev *pdev)
+{
+ struct pci_dev *misc;
+ int i;
+
+ for (i = 0; i != amd_nb_num(); i++) {
+ misc = node_to_amd_nb(i)->misc;
+
+ if (pci_domain_nr(misc->bus) == pci_domain_nr(pdev->bus) &&
+ PCI_SLOT(misc->devfn) == PCI_SLOT(pdev->devfn))
+ return i;
+ }
+
+ WARN(1, "Unable to find AMD Northbridge id for %s\n", pci_name(pdev));
+ return 0;
+}
+
#else
#define amd_nb_num(x) 0
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 3ab9bdd87e7..19b0ebafcd3 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -11,8 +11,8 @@
#include <linux/atomic.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
-#include <asm/system.h>
#include <asm/msr.h>
+#include <asm/idle.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -93,9 +93,6 @@ static inline int is_vsmp_box(void)
return 0;
}
#endif
-extern void xapic_wait_icr_idle(void);
-extern u32 safe_xapic_wait_icr_idle(void);
-extern void xapic_icr_write(u32, u32);
extern int setup_profiling_timer(unsigned int);
static inline void native_apic_mem_write(u32 reg, u32 v)
@@ -139,6 +136,11 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
}
+static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
+{
+ wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+}
+
static inline u32 native_apic_msr_read(u32 reg)
{
u64 msr;
@@ -179,7 +181,6 @@ extern int x2apic_phys;
extern int x2apic_preenabled;
extern void check_x2apic(void);
extern void enable_x2apic(void);
-extern void x2apic_icr_write(u32 low, u32 id);
static inline int x2apic_enabled(void)
{
u64 msr;
@@ -216,7 +217,6 @@ static inline void x2apic_force_phys(void)
{
}
-#define nox2apic 0
#define x2apic_preenabled 0
#define x2apic_supported() 0
#endif
@@ -288,6 +288,7 @@ struct apic {
int (*probe)(void);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
+ int (*apic_id_valid)(int apicid);
int (*apic_id_registered)(void);
u32 irq_delivery_mode;
@@ -301,7 +302,8 @@ struct apic {
unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
unsigned long (*check_apicid_present)(int apicid);
- void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
+ void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask);
void (*init_apic_ldr)(void);
void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
@@ -326,9 +328,9 @@ struct apic {
unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask;
- unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
- unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
- const struct cpumask *andmask);
+ int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
+ const struct cpumask *andmask,
+ unsigned int *apicid);
/* ipi */
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
@@ -344,13 +346,21 @@ struct apic {
int trampoline_phys_low;
int trampoline_phys_high;
- void (*wait_for_init_deassert)(atomic_t *deassert);
+ bool wait_for_init_deassert;
void (*smp_callin_clear_local_apic)(void);
void (*inquire_remote_apic)(int apicid);
/* apic ops */
u32 (*read)(u32 reg);
void (*write)(u32 reg, u32 v);
+ /*
+ * ->eoi_write() has the same signature as ->write().
+ *
+ * Drivers can support both ->eoi_write() and ->write() by passing the same
+ * callback value. Kernel can override ->eoi_write() and fall back
+ * on write for EOI.
+ */
+ void (*eoi_write)(u32 reg, u32 v);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
void (*wait_icr_idle)(void);
@@ -395,7 +405,7 @@ extern struct apic *apic;
* to enforce the order with in them.
*/
#define apic_driver(sym) \
- static struct apic *__apicdrivers_##sym __used \
+ static const struct apic *__apicdrivers_##sym __used \
__aligned(sizeof(struct apic *)) \
__section(.apicdrivers) = { &sym }
@@ -426,6 +436,11 @@ static inline void apic_write(u32 reg, u32 val)
apic->write(reg, val);
}
+static inline void apic_eoi(void)
+{
+ apic->eoi_write(APIC_EOI, APIC_EOI_ACK);
+}
+
static inline u64 apic_icr_read(void)
{
return apic->icr_read();
@@ -446,14 +461,18 @@ static inline u32 safe_apic_wait_icr_idle(void)
return apic->safe_wait_icr_idle();
}
+extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v));
+
#else /* CONFIG_X86_LOCAL_APIC */
static inline u32 apic_read(u32 reg) { return 0; }
static inline void apic_write(u32 reg, u32 val) { }
+static inline void apic_eoi(void) { }
static inline u64 apic_icr_read(void) { return 0; }
static inline void apic_icr_write(u32 low, u32 high) { }
static inline void apic_wait_icr_idle(void) { }
static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
+static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
#endif /* CONFIG_X86_LOCAL_APIC */
@@ -463,9 +482,7 @@ static inline void ack_APIC_irq(void)
* ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie.
*/
-
- /* Docs say use 0 for future compatibility */
- apic_write(APIC_EOI, 0);
+ apic_eoi();
}
static inline unsigned default_get_apic_id(unsigned long x)
@@ -495,13 +512,6 @@ extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int phys_apicid);
#endif
-static inline void default_wait_for_init_deassert(atomic_t *deassert)
-{
- while (!atomic_read(deassert))
- cpu_relax();
- return;
-}
-
extern void generic_bigsmp_probe(void);
@@ -520,7 +530,12 @@ static inline const struct cpumask *default_target_cpus(void)
#endif
}
-DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+static inline const struct cpumask *online_target_cpus(void)
+{
+ return cpu_online_mask;
+}
+
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
static inline unsigned int read_apic_id(void)
@@ -532,6 +547,11 @@ static inline unsigned int read_apic_id(void)
return apic->get_apic_id(reg);
}
+static inline int default_apic_id_valid(int apicid)
+{
+ return (apicid < 255);
+}
+
extern void default_setup_apic_routing(void);
extern struct apic apic_noop;
@@ -564,21 +584,50 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
#endif
-static inline unsigned int
-default_cpu_mask_to_apicid(const struct cpumask *cpumask)
+static inline int
+flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask,
+ unsigned int *apicid)
{
- return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
+ unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
+ cpumask_bits(andmask)[0] &
+ cpumask_bits(cpu_online_mask)[0] &
+ APIC_ALL_CPUS;
+
+ if (likely(cpu_mask)) {
+ *apicid = (unsigned int)cpu_mask;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
-static inline unsigned int
+extern int
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+ const struct cpumask *andmask,
+ unsigned int *apicid);
+
+static inline void
+flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
{
- unsigned long mask1 = cpumask_bits(cpumask)[0];
- unsigned long mask2 = cpumask_bits(andmask)[0];
- unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
+ /* Careful. Some cpus do not strictly honor the set of cpus
+ * specified in the interrupt destination when using lowest
+ * priority interrupt delivery mode.
+ *
+ * In particular there was a hyperthreading cpu observed to
+ * deliver interrupts to the wrong hyperthread when only one
+ * hyperthread was specified in the interrupt desitination.
+ */
+ cpumask_clear(retmask);
+ cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
+}
- return (unsigned int)(mask1 & mask2 & mask3);
+static inline void
+default_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
+{
+ cpumask_copy(retmask, cpumask_of(cpu));
}
static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
@@ -627,5 +676,33 @@ extern int default_check_phys_apicid_present(int phys_apicid);
#endif
#endif /* CONFIG_X86_LOCAL_APIC */
+extern void irq_enter(void);
+extern void irq_exit(void);
+
+static inline void entering_irq(void)
+{
+ irq_enter();
+ exit_idle();
+}
+
+static inline void entering_ack_irq(void)
+{
+ ack_APIC_irq();
+ entering_irq();
+}
+
+static inline void exiting_irq(void)
+{
+ irq_exit();
+}
+
+static inline void exiting_ack_irq(void)
+{
+ irq_exit();
+ /* Ack only at the end to avoid potential reentry */
+ ack_APIC_irq();
+}
+
+extern void ioapic_zap_locks(void);
#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 134bba00df0..c46bb99d5fb 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -37,7 +37,7 @@
#define APIC_ARBPRI_MASK 0xFFu
#define APIC_PROCPRI 0xA0
#define APIC_EOI 0xB0
-#define APIC_EIO_ACK 0x0
+#define APIC_EOI_ACK 0x0 /* Docs say 0 for future compat. */
#define APIC_RRR 0xC0
#define APIC_LDR 0xD0
#define APIC_LDR_MASK (0xFFu << 24)
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
index 0d9ec770f2f..69f1366f1aa 100644
--- a/arch/x86/include/asm/archrandom.h
+++ b/arch/x86/include/asm/archrandom.h
@@ -1,7 +1,7 @@
/*
* This file is part of the Linux kernel.
*
- * Copyright (c) 2011, Intel Corporation
+ * Copyright (c) 2011-2014, Intel Corporation
* Authors: Fenghua Yu <fenghua.yu@intel.com>,
* H. Peter Anvin <hpa@linux.intel.com>
*
@@ -31,14 +31,41 @@
#define RDRAND_RETRY_LOOPS 10
#define RDRAND_INT ".byte 0x0f,0xc7,0xf0"
+#define RDSEED_INT ".byte 0x0f,0xc7,0xf8"
#ifdef CONFIG_X86_64
# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0"
+# define RDSEED_LONG ".byte 0x48,0x0f,0xc7,0xf8"
#else
# define RDRAND_LONG RDRAND_INT
+# define RDSEED_LONG RDSEED_INT
#endif
#ifdef CONFIG_ARCH_RANDOM
+/* Instead of arch_get_random_long() when alternatives haven't run. */
+static inline int rdrand_long(unsigned long *v)
+{
+ int ok;
+ asm volatile("1: " RDRAND_LONG "\n\t"
+ "jc 2f\n\t"
+ "decl %0\n\t"
+ "jnz 1b\n\t"
+ "2:"
+ : "=r" (ok), "=a" (*v)
+ : "0" (RDRAND_RETRY_LOOPS));
+ return ok;
+}
+
+/* A single attempt at RDSEED */
+static inline bool rdseed_long(unsigned long *v)
+{
+ unsigned char ok;
+ asm volatile(RDSEED_LONG "\n\t"
+ "setc %0"
+ : "=qm" (ok), "=a" (*v));
+ return ok;
+}
+
#define GET_RANDOM(name, type, rdrand, nop) \
static inline int name(type *v) \
{ \
@@ -56,18 +83,52 @@ static inline int name(type *v) \
return ok; \
}
+#define GET_SEED(name, type, rdseed, nop) \
+static inline int name(type *v) \
+{ \
+ unsigned char ok; \
+ alternative_io("movb $0, %0\n\t" \
+ nop, \
+ rdseed "\n\t" \
+ "setc %0", \
+ X86_FEATURE_RDSEED, \
+ ASM_OUTPUT2("=q" (ok), "=a" (*v))); \
+ return ok; \
+}
+
#ifdef CONFIG_X86_64
GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
+GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP5);
+GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
+
#else
GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
+GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP4);
+GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
+
#endif /* CONFIG_X86_64 */
+#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
+#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
+
+#else
+
+static inline int rdrand_long(unsigned long *v)
+{
+ return 0;
+}
+
+static inline bool rdseed_long(unsigned long *v)
+{
+ return 0;
+}
+
#endif /* CONFIG_ARCH_RANDOM */
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 9412d6558c8..7730c1c5c83 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,23 +3,25 @@
#ifdef __ASSEMBLY__
# define __ASM_FORM(x) x
+# define __ASM_FORM_RAW(x) x
# define __ASM_FORM_COMMA(x) x,
-# define __ASM_EX_SEC .section __ex_table, "a"
#else
# define __ASM_FORM(x) " " #x " "
+# define __ASM_FORM_RAW(x) #x
# define __ASM_FORM_COMMA(x) " " #x ","
-# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
#endif
#ifdef CONFIG_X86_32
# define __ASM_SEL(a,b) __ASM_FORM(a)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
#else
# define __ASM_SEL(a,b) __ASM_FORM(b)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
#endif
#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
inst##q##__VA_ARGS__)
-#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
+#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg)
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
@@ -42,17 +44,40 @@
/* Exception table entry */
#ifdef __ASSEMBLY__
-# define _ASM_EXTABLE(from,to) \
- __ASM_EX_SEC ; \
- _ASM_ALIGN ; \
- _ASM_PTR from , to ; \
- .previous
+# define _ASM_EXTABLE(from,to) \
+ .pushsection "__ex_table","a" ; \
+ .balign 8 ; \
+ .long (from) - . ; \
+ .long (to) - . ; \
+ .popsection
+
+# define _ASM_EXTABLE_EX(from,to) \
+ .pushsection "__ex_table","a" ; \
+ .balign 8 ; \
+ .long (from) - . ; \
+ .long (to) - . + 0x7ffffff0 ; \
+ .popsection
+
+# define _ASM_NOKPROBE(entry) \
+ .pushsection "_kprobe_blacklist","aw" ; \
+ _ASM_ALIGN ; \
+ _ASM_PTR (entry); \
+ .popsection
#else
-# define _ASM_EXTABLE(from,to) \
- __ASM_EX_SEC \
- _ASM_ALIGN "\n" \
- _ASM_PTR #from "," #to "\n" \
- " .previous\n"
+# define _ASM_EXTABLE(from,to) \
+ " .pushsection \"__ex_table\",\"a\"\n" \
+ " .balign 8\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - .\n" \
+ " .popsection\n"
+
+# define _ASM_EXTABLE_EX(from,to) \
+ " .pushsection \"__ex_table\",\"a\"\n" \
+ " .balign 8\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - . + 0x7ffffff0\n" \
+ " .popsection\n"
+/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 58cb6d4085f..6dd1c7dd047 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -6,6 +6,8 @@
#include <asm/processor.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
+#include <asm/rmwcc.h>
+#include <asm/barrier.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -76,12 +78,7 @@ static inline void atomic_sub(int i, atomic_t *v)
*/
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
- : "+m" (v->counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
}
/**
@@ -118,12 +115,7 @@ static inline void atomic_dec(atomic_t *v)
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "decl %0; sete %1"
- : "+m" (v->counter), "=qm" (c)
- : : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
}
/**
@@ -136,12 +128,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
*/
static inline int atomic_inc_and_test(atomic_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "incl %0; sete %1"
- : "+m" (v->counter), "=qm" (c)
- : : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
}
/**
@@ -155,12 +142,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
*/
static inline int atomic_add_negative(int i, atomic_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
- : "+m" (v->counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
}
/**
@@ -172,23 +154,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
-#ifdef CONFIG_M386
- int __i;
- unsigned long flags;
- if (unlikely(boot_cpu_data.x86 <= 3))
- goto no_xadd;
-#endif
- /* Modern 486+ processor */
return i + xadd(&v->counter, i);
-
-#ifdef CONFIG_M386
-no_xadd: /* Legacy 386 processor */
- raw_local_irq_save(flags);
- __i = atomic_read(v);
- atomic_set(v, i + __i);
- raw_local_irq_restore(flags);
- return i + __i;
-#endif
}
/**
@@ -240,30 +206,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
-
-/*
- * atomic_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-static inline int atomic_dec_if_positive(atomic_t *v)
-{
- int c, old, dec;
- c = atomic_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-
/**
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
@@ -302,16 +244,10 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
: : "r" ((unsigned)(mask)), "m" (*(addr)) \
: "memory")
-/* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
#ifdef CONFIG_X86_32
-# include "atomic64_32.h"
+# include <asm/atomic64_32.h>
#else
-# include "atomic64_64.h"
+# include <asm/atomic64_64.h>
#endif
#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index fa13f0ec287..b154de75c90 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -14,17 +14,56 @@ typedef struct {
#define ATOMIC64_INIT(val) { (val) }
+#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
+#ifndef ATOMIC64_EXPORT
+#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
+#else
+#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
+ ATOMIC64_EXPORT(atomic64_##sym)
+#endif
+
#ifdef CONFIG_X86_CMPXCHG64
-#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8"
+#define __alternative_atomic64(f, g, out, in...) \
+ asm volatile("call %P[func]" \
+ : out : [func] "i" (atomic64_##g##_cx8), ## in)
+
+#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
#else
-#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8)
+#define __alternative_atomic64(f, g, out, in...) \
+ alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
+ X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
+
+#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
+ ATOMIC64_DECL_ONE(sym##_386)
+
+ATOMIC64_DECL_ONE(add_386);
+ATOMIC64_DECL_ONE(sub_386);
+ATOMIC64_DECL_ONE(inc_386);
+ATOMIC64_DECL_ONE(dec_386);
#endif
-#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f)
+#define alternative_atomic64(f, out, in...) \
+ __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
+
+ATOMIC64_DECL(read);
+ATOMIC64_DECL(set);
+ATOMIC64_DECL(xchg);
+ATOMIC64_DECL(add_return);
+ATOMIC64_DECL(sub_return);
+ATOMIC64_DECL(inc_return);
+ATOMIC64_DECL(dec_return);
+ATOMIC64_DECL(dec_if_positive);
+ATOMIC64_DECL(inc_not_zero);
+ATOMIC64_DECL(add_unless);
+
+#undef ATOMIC64_DECL
+#undef ATOMIC64_DECL_ONE
+#undef __ATOMIC64_DECL
+#undef ATOMIC64_EXPORT
/**
* atomic64_cmpxchg - cmpxchg atomic64 variable
- * @p: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
* @o: expected value
* @n: new value
*
@@ -50,18 +89,16 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n)
long long o;
unsigned high = (unsigned)(n >> 32);
unsigned low = (unsigned)n;
- asm volatile(ATOMIC64_ALTERNATIVE(xchg)
- : "=A" (o), "+b" (low), "+c" (high)
- : "S" (v)
- : "memory"
- );
+ alternative_atomic64(xchg, "=&A" (o),
+ "S" (v), "b" (low), "c" (high)
+ : "memory");
return o;
}
/**
* atomic64_set - set atomic64 variable
* @v: pointer to type atomic64_t
- * @n: value to assign
+ * @i: value to assign
*
* Atomically sets the value of @v to @n.
*/
@@ -69,11 +106,9 @@ static inline void atomic64_set(atomic64_t *v, long long i)
{
unsigned high = (unsigned)(i >> 32);
unsigned low = (unsigned)i;
- asm volatile(ATOMIC64_ALTERNATIVE(set)
- : "+b" (low), "+c" (high)
- : "S" (v)
- : "eax", "edx", "memory"
- );
+ alternative_atomic64(set, /* no output */,
+ "S" (v), "b" (low), "c" (high)
+ : "eax", "edx", "memory");
}
/**
@@ -85,10 +120,7 @@ static inline void atomic64_set(atomic64_t *v, long long i)
static inline long long atomic64_read(const atomic64_t *v)
{
long long r;
- asm volatile(ATOMIC64_ALTERNATIVE(read)
- : "=A" (r), "+c" (v)
- : : "memory"
- );
+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
return r;
}
@@ -101,10 +133,9 @@ static inline long long atomic64_read(const atomic64_t *v)
*/
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
- asm volatile(ATOMIC64_ALTERNATIVE(add_return)
- : "+A" (i), "+c" (v)
- : : "memory"
- );
+ alternative_atomic64(add_return,
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
+ ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
@@ -113,32 +144,25 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
*/
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
- asm volatile(ATOMIC64_ALTERNATIVE(sub_return)
- : "+A" (i), "+c" (v)
- : : "memory"
- );
+ alternative_atomic64(sub_return,
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
+ ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
static inline long long atomic64_inc_return(atomic64_t *v)
{
long long a;
- asm volatile(ATOMIC64_ALTERNATIVE(inc_return)
- : "=A" (a)
- : "S" (v)
- : "memory", "ecx"
- );
+ alternative_atomic64(inc_return, "=&A" (a),
+ "S" (v) : "memory", "ecx");
return a;
}
static inline long long atomic64_dec_return(atomic64_t *v)
{
long long a;
- asm volatile(ATOMIC64_ALTERNATIVE(dec_return)
- : "=A" (a)
- : "S" (v)
- : "memory", "ecx"
- );
+ alternative_atomic64(dec_return, "=&A" (a),
+ "S" (v) : "memory", "ecx");
return a;
}
@@ -151,10 +175,9 @@ static inline long long atomic64_dec_return(atomic64_t *v)
*/
static inline long long atomic64_add(long long i, atomic64_t *v)
{
- asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return)
- : "+A" (i), "+c" (v)
- : : "memory"
- );
+ __alternative_atomic64(add, add_return,
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
+ ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
@@ -167,10 +190,9 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
*/
static inline long long atomic64_sub(long long i, atomic64_t *v)
{
- asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return)
- : "+A" (i), "+c" (v)
- : : "memory"
- );
+ __alternative_atomic64(sub, sub_return,
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
+ ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
@@ -178,7 +200,7 @@ static inline long long atomic64_sub(long long i, atomic64_t *v)
* atomic64_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer to type atomic64_t
- *
+ *
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
@@ -196,24 +218,20 @@ static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
*/
static inline void atomic64_inc(atomic64_t *v)
{
- asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return)
- : : "S" (v)
- : "memory", "eax", "ecx", "edx"
- );
+ __alternative_atomic64(inc, inc_return, /* no output */,
+ "S" (v) : "memory", "eax", "ecx", "edx");
}
/**
* atomic64_dec - decrement atomic64 variable
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically decrements @ptr by 1.
+ * Atomically decrements @v by 1.
*/
static inline void atomic64_dec(atomic64_t *v)
{
- asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return)
- : : "S" (v)
- : "memory", "eax", "ecx", "edx"
- );
+ __alternative_atomic64(dec, dec_return, /* no output */,
+ "S" (v) : "memory", "eax", "ecx", "edx");
}
/**
@@ -263,15 +281,15 @@ static inline int atomic64_add_negative(long long i, atomic64_t *v)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
+ * Returns non-zero if the add was done, zero otherwise.
*/
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned low = (unsigned)u;
unsigned high = (unsigned)(u >> 32);
- asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t"
- : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
- : : "memory");
+ alternative_atomic64(add_unless,
+ ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
+ "S" (v) : "memory");
return (int)a;
}
@@ -279,26 +297,20 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
static inline int atomic64_inc_not_zero(atomic64_t *v)
{
int r;
- asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero)
- : "=a" (r)
- : "S" (v)
- : "ecx", "edx", "memory"
- );
+ alternative_atomic64(inc_not_zero, "=&a" (r),
+ "S" (v) : "ecx", "edx", "memory");
return r;
}
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
long long r;
- asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive)
- : "=A" (r)
- : "S" (v)
- : "ecx", "memory"
- );
+ alternative_atomic64(dec_if_positive, "=&A" (r),
+ "S" (v) : "ecx", "memory");
return r;
}
-#undef ATOMIC64_ALTERNATIVE
-#undef ATOMIC64_ALTERNATIVE_
+#undef alternative_atomic64
+#undef __alternative_atomic64
#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 0e1cbfc8ee0..46e9052bbd2 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -72,12 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
*/
static inline int atomic64_sub_and_test(long i, atomic64_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
- : "=m" (v->counter), "=qm" (c)
- : "er" (i), "m" (v->counter) : "memory");
- return c;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
}
/**
@@ -116,12 +111,7 @@ static inline void atomic64_dec(atomic64_t *v)
*/
static inline int atomic64_dec_and_test(atomic64_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "decq %0; sete %1"
- : "=m" (v->counter), "=qm" (c)
- : "m" (v->counter) : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
}
/**
@@ -134,12 +124,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
*/
static inline int atomic64_inc_and_test(atomic64_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "incq %0; sete %1"
- : "=m" (v->counter), "=qm" (c)
- : "m" (v->counter) : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
}
/**
@@ -153,12 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
*/
static inline int atomic64_add_negative(long i, atomic64_t *v)
{
- unsigned char c;
-
- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
- : "=m" (v->counter), "=qm" (c)
- : "er" (i), "m" (v->counter) : "memory");
- return c;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
}
/**
diff --git a/arch/x86/include/asm/auxvec.h b/arch/x86/include/asm/auxvec.h
deleted file mode 100644
index 1316b4c3542..00000000000
--- a/arch/x86/include/asm/auxvec.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_AUXVEC_H
-#define _ASM_X86_AUXVEC_H
-/*
- * Architecture-neutral AT_ values in 0-17, leave some room
- * for more of them, start the x86-specific ones at 32.
- */
-#ifdef __i386__
-#define AT_SYSINFO 32
-#endif
-#define AT_SYSINFO_EHDR 33
-
-#endif /* _ASM_X86_AUXVEC_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
new file mode 100644
index 00000000000..5c7198cca5e
--- /dev/null
+++ b/arch/x86/include/asm/barrier.h
@@ -0,0 +1,157 @@
+#ifndef _ASM_X86_BARRIER_H
+#define _ASM_X86_BARRIER_H
+
+#include <asm/alternative.h>
+#include <asm/nops.h>
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+
+#ifdef CONFIG_X86_32
+/*
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#else
+#define mb() asm volatile("mfence":::"memory")
+#define rmb() asm volatile("lfence":::"memory")
+#define wmb() asm volatile("sfence" ::: "memory")
+#endif
+
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier. All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads. This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies. See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * b = 2;
+ * memory_barrier();
+ * p = &b; q = p;
+ * read_barrier_depends();
+ * d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends(). However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * a = 2;
+ * memory_barrier();
+ * b = 3; y = b;
+ * read_barrier_depends();
+ * x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+ * in cases like this where there are no data dependencies.
+ **/
+
+#define read_barrier_depends() do { } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#ifdef CONFIG_X86_PPRO_FENCE
+# define smp_rmb() rmb()
+#else
+# define smp_rmb() barrier()
+#endif
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() read_barrier_depends()
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#else /* !SMP */
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while (0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif /* SMP */
+
+#if defined(CONFIG_X86_PPRO_FENCE)
+
+/*
+ * For either of these options x86 doesn't have a strong TSO memory
+ * model and we should fall back to full barriers.
+ */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
+#else /* regular x86 TSO memory ordering */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
+#endif
+
+/* Atomic operations are already serializing on x86 */
+#define smp_mb__before_atomic() barrier()
+#define smp_mb__after_atomic() barrier()
+
+/*
+ * Stop RDTSC speculation. This is needed when you need to use RDTSC
+ * (or get_cycles or vread that possibly accesses the TSC) in a defined
+ * code region.
+ *
+ * (Could use an alternative three way for this if there was one.)
+ */
+static __always_inline void rdtsc_barrier(void)
+{
+ alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+ alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
+
+#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index b97596e2b68..afcd35d331d 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -14,6 +14,18 @@
#include <linux/compiler.h>
#include <asm/alternative.h>
+#include <asm/rmwcc.h>
+#include <asm/barrier.h>
+
+#if BITS_PER_LONG == 32
+# define _BITOPS_LONG_SHIFT 5
+#elif BITS_PER_LONG == 64
+# define _BITOPS_LONG_SHIFT 6
+#else
+# error "Unexpected BITS_PER_LONG"
+#endif
+
+#define BIT_64(n) (U64_C(1) << (n))
/*
* These have to be done with inline assembly: that way the bit-setting
@@ -57,7 +69,7 @@
* restricted to acting on a single-word quantity.
*/
static __always_inline void
-set_bit(unsigned int nr, volatile unsigned long *addr)
+set_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0"
@@ -79,7 +91,7 @@ set_bit(unsigned int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __set_bit(int nr, volatile unsigned long *addr)
+static inline void __set_bit(long nr, volatile unsigned long *addr)
{
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
@@ -91,11 +103,11 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static __always_inline void
-clear_bit(int nr, volatile unsigned long *addr)
+clear_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0"
@@ -116,13 +128,13 @@ clear_bit(int nr, volatile unsigned long *addr)
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
-static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
+static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
+static inline void __clear_bit(long nr, volatile unsigned long *addr)
{
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
@@ -139,15 +151,12 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
-static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
+static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
}
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
@@ -157,7 +166,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __change_bit(int nr, volatile unsigned long *addr)
+static inline void __change_bit(long nr, volatile unsigned long *addr)
{
asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
}
@@ -171,7 +180,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "xorb %1,%0"
@@ -192,14 +201,9 @@ static inline void change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
-
- asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
- "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
-
- return oldbit;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
}
/**
@@ -210,7 +214,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
* This is the same as test_and_set_bit on x86.
*/
static __always_inline int
-test_and_set_bit_lock(int nr, volatile unsigned long *addr)
+test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
@@ -224,7 +228,7 @@ test_and_set_bit_lock(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
@@ -243,15 +247,9 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
-
- asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
-
- return oldbit;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
}
/**
@@ -262,8 +260,15 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
+ *
+ * Note: the operation is performed atomically with respect to
+ * the local CPU, but not other CPUs. Portable code should not
+ * rely on this behaviour.
+ * KVM relies on this behaviour on x86 for modifying memory that is also
+ * accessed from a hypervisor on the same CPU if running in a VM: don't change
+ * this without also updating arch/x86/kernel/kvm.c
*/
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
@@ -275,7 +280,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
}
/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
@@ -295,24 +300,18 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
{
- int oldbit;
-
- asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
- "sbb %0,%0"
- : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
-
- return oldbit;
+ GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
}
-static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
{
- return ((1UL << (nr % BITS_PER_LONG)) &
- (addr[nr / BITS_PER_LONG])) != 0;
+ return ((1UL << (nr & (BITS_PER_LONG-1))) &
+ (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
-static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
+static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
{
int oldbit;
@@ -346,7 +345,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
*/
static inline unsigned long __ffs(unsigned long word)
{
- asm("bsf %1,%0"
+ asm("rep; bsf %1,%0"
: "=r" (word)
: "rm" (word));
return word;
@@ -360,7 +359,7 @@ static inline unsigned long __ffs(unsigned long word)
*/
static inline unsigned long ffz(unsigned long word)
{
- asm("bsf %1,%0"
+ asm("rep; bsf %1,%0"
: "=r" (word)
: "r" (~word));
return word;
@@ -408,10 +407,9 @@ static inline int ffs(int x)
* We cannot do this on 32 bits because at the very least some
* 486 CPUs did not behave this way.
*/
- long tmp = -1;
asm("bsfl %1,%0"
: "=r" (r)
- : "rm" (x), "0" (tmp));
+ : "rm" (x), "0" (-1));
#elif defined(CONFIG_X86_CMOV)
asm("bsfl %1,%0\n\t"
"cmovzl %2,%0"
@@ -450,10 +448,9 @@ static inline int fls(int x)
* We cannot do this on 32 bits because at the very least some
* 486 CPUs did not behave this way.
*/
- long tmp = -1;
asm("bsrl %1,%0"
: "=r" (r)
- : "rm" (x), "0" (tmp));
+ : "rm" (x), "0" (-1));
#elif defined(CONFIG_X86_CMOV)
asm("bsrl %1,%0\n\t"
"cmovzl %2,%0"
@@ -481,13 +478,13 @@ static inline int fls(int x)
#ifdef CONFIG_X86_64
static __always_inline int fls64(__u64 x)
{
- long bitpos = -1;
+ int bitpos = -1;
/*
* AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
* dest reg is undefined if x==0, but their CPU architect says its
* value is written to set it to the same as before.
*/
- asm("bsrq %1,%0"
+ asm("bsrq %1,%q0"
: "+r" (bitpos)
: "rm" (x));
return bitpos + 1;
diff --git a/arch/x86/include/asm/bitsperlong.h b/arch/x86/include/asm/bitsperlong.h
deleted file mode 100644
index b0ae1c4dc79..00000000000
--- a/arch/x86/include/asm/bitsperlong.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_X86_BITSPERLONG_H
-#define __ASM_X86_BITSPERLONG_H
-
-#ifdef __x86_64__
-# define __BITS_PER_LONG 64
-#else
-# define __BITS_PER_LONG 32
-#endif
-
-#include <asm-generic/bitsperlong.h>
-
-#endif /* __ASM_X86_BITSPERLONG_H */
-
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 5e1a2eef3e7..4fa687a47a6 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -1,14 +1,9 @@
#ifndef _ASM_X86_BOOT_H
#define _ASM_X86_BOOT_H
-/* Internal svga startup constants */
-#define NORMAL_VGA 0xffff /* 80x25 mode */
-#define EXTENDED_VGA 0xfffe /* 80x50 mode */
-#define ASK_VGA 0xfffd /* ask for it at bootup */
-
-#ifdef __KERNEL__
#include <asm/pgtable_types.h>
+#include <uapi/asm/boot.h>
/* Physical address where kernel should be loaded. */
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
@@ -19,7 +14,7 @@
#ifdef CONFIG_X86_64
#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
#else
-#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER)
+#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER)
#endif
#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
@@ -42,6 +37,4 @@
#define BOOT_STACK_SIZE 0x1000
#endif
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h
deleted file mode 100644
index 2f90c51cc49..00000000000
--- a/arch/x86/include/asm/bootparam.h
+++ /dev/null
@@ -1,136 +0,0 @@
-#ifndef _ASM_X86_BOOTPARAM_H
-#define _ASM_X86_BOOTPARAM_H
-
-#include <linux/types.h>
-#include <linux/screen_info.h>
-#include <linux/apm_bios.h>
-#include <linux/edd.h>
-#include <asm/e820.h>
-#include <asm/ist.h>
-#include <video/edid.h>
-
-/* setup data types */
-#define SETUP_NONE 0
-#define SETUP_E820_EXT 1
-#define SETUP_DTB 2
-
-/* extensible setup data list node */
-struct setup_data {
- __u64 next;
- __u32 type;
- __u32 len;
- __u8 data[0];
-};
-
-struct setup_header {
- __u8 setup_sects;
- __u16 root_flags;
- __u32 syssize;
- __u16 ram_size;
-#define RAMDISK_IMAGE_START_MASK 0x07FF
-#define RAMDISK_PROMPT_FLAG 0x8000
-#define RAMDISK_LOAD_FLAG 0x4000
- __u16 vid_mode;
- __u16 root_dev;
- __u16 boot_flag;
- __u16 jump;
- __u32 header;
- __u16 version;
- __u32 realmode_swtch;
- __u16 start_sys;
- __u16 kernel_version;
- __u8 type_of_loader;
- __u8 loadflags;
-#define LOADED_HIGH (1<<0)
-#define QUIET_FLAG (1<<5)
-#define KEEP_SEGMENTS (1<<6)
-#define CAN_USE_HEAP (1<<7)
- __u16 setup_move_size;
- __u32 code32_start;
- __u32 ramdisk_image;
- __u32 ramdisk_size;
- __u32 bootsect_kludge;
- __u16 heap_end_ptr;
- __u8 ext_loader_ver;
- __u8 ext_loader_type;
- __u32 cmd_line_ptr;
- __u32 initrd_addr_max;
- __u32 kernel_alignment;
- __u8 relocatable_kernel;
- __u8 _pad2[3];
- __u32 cmdline_size;
- __u32 hardware_subarch;
- __u64 hardware_subarch_data;
- __u32 payload_offset;
- __u32 payload_length;
- __u64 setup_data;
- __u64 pref_address;
- __u32 init_size;
-} __attribute__((packed));
-
-struct sys_desc_table {
- __u16 length;
- __u8 table[14];
-};
-
-/* Gleaned from OFW's set-parameters in cpu/x86/pc/linux.fth */
-struct olpc_ofw_header {
- __u32 ofw_magic; /* OFW signature */
- __u32 ofw_version;
- __u32 cif_handler; /* callback into OFW */
- __u32 irq_desc_table;
-} __attribute__((packed));
-
-struct efi_info {
- __u32 efi_loader_signature;
- __u32 efi_systab;
- __u32 efi_memdesc_size;
- __u32 efi_memdesc_version;
- __u32 efi_memmap;
- __u32 efi_memmap_size;
- __u32 efi_systab_hi;
- __u32 efi_memmap_hi;
-};
-
-/* The so-called "zeropage" */
-struct boot_params {
- struct screen_info screen_info; /* 0x000 */
- struct apm_bios_info apm_bios_info; /* 0x040 */
- __u8 _pad2[4]; /* 0x054 */
- __u64 tboot_addr; /* 0x058 */
- struct ist_info ist_info; /* 0x060 */
- __u8 _pad3[16]; /* 0x070 */
- __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
- __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
- struct sys_desc_table sys_desc_table; /* 0x0a0 */
- struct olpc_ofw_header olpc_ofw_header; /* 0x0b0 */
- __u8 _pad4[128]; /* 0x0c0 */
- struct edid_info edid_info; /* 0x140 */
- struct efi_info efi_info; /* 0x1c0 */
- __u32 alt_mem_k; /* 0x1e0 */
- __u32 scratch; /* Scratch field! */ /* 0x1e4 */
- __u8 e820_entries; /* 0x1e8 */
- __u8 eddbuf_entries; /* 0x1e9 */
- __u8 edd_mbr_sig_buf_entries; /* 0x1ea */
- __u8 _pad6[6]; /* 0x1eb */
- struct setup_header hdr; /* setup header */ /* 0x1f1 */
- __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
- __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
- struct e820entry e820_map[E820MAX]; /* 0x2d0 */
- __u8 _pad8[48]; /* 0xcd0 */
- struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */
- __u8 _pad9[276]; /* 0xeec */
-} __attribute__((packed));
-
-enum {
- X86_SUBARCH_PC = 0,
- X86_SUBARCH_LGUEST,
- X86_SUBARCH_XEN,
- X86_SUBARCH_MRST,
- X86_SUBARCH_CE4100,
- X86_NR_SUBARCHS,
-};
-
-
-
-#endif /* _ASM_X86_BOOTPARAM_H */
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
new file mode 100644
index 00000000000..4a8cb8d7cbd
--- /dev/null
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -0,0 +1,54 @@
+#ifndef _ASM_X86_BOOTPARAM_UTILS_H
+#define _ASM_X86_BOOTPARAM_UTILS_H
+
+#include <asm/bootparam.h>
+
+/*
+ * This file is included from multiple environments. Do not
+ * add completing #includes to make it standalone.
+ */
+
+/*
+ * Deal with bootloaders which fail to initialize unknown fields in
+ * boot_params to zero. The list fields in this list are taken from
+ * analysis of kexec-tools; if other broken bootloaders initialize a
+ * different set of fields we will need to figure out how to disambiguate.
+ *
+ * Note: efi_info is commonly left uninitialized, but that field has a
+ * private magic, so it is better to leave it unchanged.
+ */
+static void sanitize_boot_params(struct boot_params *boot_params)
+{
+ /*
+ * IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear
+ * this field. The purpose of this field is to guarantee
+ * compliance with the x86 boot spec located in
+ * Documentation/x86/boot.txt . That spec says that the
+ * *whole* structure should be cleared, after which only the
+ * portion defined by struct setup_header (boot_params->hdr)
+ * should be copied in.
+ *
+ * If you're having an issue because the sentinel is set, you
+ * need to change the whole structure to be cleared, not this
+ * (or any other) individual field, or you will soon have
+ * problems again.
+ */
+ if (boot_params->sentinel) {
+ /* fields in boot_params are left uninitialized, clear them */
+ memset(&boot_params->ext_ramdisk_image, 0,
+ (char *)&boot_params->efi_info -
+ (char *)&boot_params->ext_ramdisk_image);
+ memset(&boot_params->kbd_status, 0,
+ (char *)&boot_params->hdr -
+ (char *)&boot_params->kbd_status);
+ memset(&boot_params->_pad7[0], 0,
+ (char *)&boot_params->edd_mbr_sig_buffer[0] -
+ (char *)&boot_params->_pad7[0]);
+ memset(&boot_params->_pad8[0], 0,
+ (char *)&boot_params->eddbuf[0] -
+ (char *)&boot_params->_pad8[0]);
+ memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+ }
+}
+
+#endif /* _ASM_X86_BOOTPARAM_UTILS_H */
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index f654d1bb17f..ba38ebbaced 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -1,7 +1,6 @@
#ifndef _ASM_X86_BUG_H
#define _ASM_X86_BUG_H
-#ifdef CONFIG_BUG
#define HAVE_ARCH_BUG
#ifdef CONFIG_DEBUG_BUGVERBOSE
@@ -33,7 +32,6 @@ do { \
} while (0)
#endif
-#endif /* !CONFIG_BUG */
-
#include <asm-generic/bug.h>
+
#endif /* _ASM_X86_BUG_H */
diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h
deleted file mode 100644
index b13a7a88f3e..00000000000
--- a/arch/x86/include/asm/byteorder.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_X86_BYTEORDER_H
-#define _ASM_X86_BYTEORDER_H
-
-#include <linux/byteorder/little_endian.h>
-
-#endif /* _ASM_X86_BYTEORDER_H */
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 4e12668711e..9863ee3747d 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -3,6 +3,7 @@
/* Caches aren't brain-dead on the intel. */
#include <asm-generic/cacheflush.h>
+#include <asm/special_insns.h>
#ifdef CONFIG_X86_PAT
/*
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index a9e3a740f69..cb4c73bfeb4 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -46,41 +46,41 @@ For 32-bit we have the following conventions - kernel is built with
*/
-#include "dwarf2.h"
+#include <asm/dwarf2.h>
+
+#ifdef CONFIG_X86_64
/*
- * 64-bit system call stack frame layout defines and helpers, for
- * assembly code (note that the seemingly unnecessary parentheses
- * are to prevent cpp from inserting spaces in expressions that get
- * passed to macros):
+ * 64-bit system call stack frame layout defines and helpers,
+ * for assembly code:
*/
-#define R15 (0)
-#define R14 (8)
-#define R13 (16)
-#define R12 (24)
-#define RBP (32)
-#define RBX (40)
+#define R15 0
+#define R14 8
+#define R13 16
+#define R12 24
+#define RBP 32
+#define RBX 40
/* arguments: interrupts/non tracing syscalls only save up to here: */
-#define R11 (48)
-#define R10 (56)
-#define R9 (64)
-#define R8 (72)
-#define RAX (80)
-#define RCX (88)
-#define RDX (96)
-#define RSI (104)
-#define RDI (112)
-#define ORIG_RAX (120) /* + error_code */
+#define R11 48
+#define R10 56
+#define R9 64
+#define R8 72
+#define RAX 80
+#define RCX 88
+#define RDX 96
+#define RSI 104
+#define RDI 112
+#define ORIG_RAX 120 /* + error_code */
/* end of arguments */
/* cpu exception frame or undefined in case of fast syscall: */
-#define RIP (128)
-#define CS (136)
-#define EFLAGS (144)
-#define RSP (152)
-#define SS (160)
+#define RIP 128
+#define CS 136
+#define EFLAGS 144
+#define RSP 152
+#define SS 160
#define ARGOFFSET R11
#define SWFRAME ORIG_RAX
@@ -194,3 +194,51 @@ For 32-bit we have the following conventions - kernel is built with
.macro icebp
.byte 0xf1
.endm
+
+#else /* CONFIG_X86_64 */
+
+/*
+ * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
+ * are different from the entry_32.S versions in not changing the segment
+ * registers. So only suitable for in kernel use, not when transitioning
+ * from or to user space. The resulting stack frame is not a standard
+ * pt_regs frame. The main use case is calling C code from assembler
+ * when all the registers need to be preserved.
+ */
+
+ .macro SAVE_ALL
+ pushl_cfi %eax
+ CFI_REL_OFFSET eax, 0
+ pushl_cfi %ebp
+ CFI_REL_OFFSET ebp, 0
+ pushl_cfi %edi
+ CFI_REL_OFFSET edi, 0
+ pushl_cfi %esi
+ CFI_REL_OFFSET esi, 0
+ pushl_cfi %edx
+ CFI_REL_OFFSET edx, 0
+ pushl_cfi %ecx
+ CFI_REL_OFFSET ecx, 0
+ pushl_cfi %ebx
+ CFI_REL_OFFSET ebx, 0
+ .endm
+
+ .macro RESTORE_ALL
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+ popl_cfi %ecx
+ CFI_RESTORE ecx
+ popl_cfi %edx
+ CFI_RESTORE edx
+ popl_cfi %esi
+ CFI_RESTORE esi
+ popl_cfi %edi
+ CFI_RESTORE edi
+ popl_cfi %ebp
+ CFI_RESTORE ebp
+ popl_cfi %eax
+ CFI_RESTORE eax
+ .endm
+
+#endif /* CONFIG_X86_64 */
+
diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h
index 848850fd7d6..5f5bb0f9736 100644
--- a/arch/x86/include/asm/checksum.h
+++ b/arch/x86/include/asm/checksum.h
@@ -1,5 +1,5 @@
#ifdef CONFIG_X86_32
-# include "checksum_32.h"
+# include <asm/checksum_32.h>
#else
-# include "checksum_64.h"
+# include <asm/checksum_64.h>
#endif
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 46fc474fd81..f50de695173 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -49,9 +49,15 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
int len, __wsum sum,
int *err_ptr)
{
+ __wsum ret;
+
might_sleep();
- return csum_partial_copy_generic((__force void *)src, dst,
- len, sum, err_ptr, NULL);
+ stac();
+ ret = csum_partial_copy_generic((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ clac();
+
+ return ret;
}
/*
@@ -176,10 +182,16 @@ static inline __wsum csum_and_copy_to_user(const void *src,
int len, __wsum sum,
int *err_ptr)
{
+ __wsum ret;
+
might_sleep();
- if (access_ok(VERIFY_WRITE, dst, len))
- return csum_partial_copy_generic(src, (__force void *)dst,
- len, sum, NULL, err_ptr);
+ if (access_ok(VERIFY_WRITE, dst, len)) {
+ stac();
+ ret = csum_partial_copy_generic(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+ clac();
+ return ret;
+ }
if (len)
*err_ptr = -EFAULT;
diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h
index 9bfdc41629e..cd00e177449 100644
--- a/arch/x86/include/asm/checksum_64.h
+++ b/arch/x86/include/asm/checksum_64.h
@@ -133,7 +133,7 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* Do not call this directly. Use the wrappers below */
-extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
+extern __visible __wsum csum_partial_copy_generic(const void *src, const void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
@@ -184,8 +184,15 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
asm("addl %2,%0\n\t"
"adcl $0,%0"
: "=r" (a)
- : "0" (a), "r" (b));
+ : "0" (a), "rm" (b));
return a;
}
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ return (__force __wsum)add32_with_carry((__force unsigned)csum,
+ (__force unsigned)addend);
+}
+
#endif /* _ASM_X86_CHECKSUM_64_H */
diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h
index 0bdbbb3b9ce..eda81dc0f4a 100644
--- a/arch/x86/include/asm/clocksource.h
+++ b/arch/x86/include/asm/clocksource.h
@@ -3,16 +3,13 @@
#ifndef _ASM_X86_CLOCKSOURCE_H
#define _ASM_X86_CLOCKSOURCE_H
-#ifdef CONFIG_X86_64
-
#define VCLOCK_NONE 0 /* No vDSO clock available. */
#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */
+#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
struct arch_clocksource_data {
int vclock_mode;
};
-#endif /* CONFIG_X86_64 */
-
#endif /* _ASM_X86_CLOCKSOURCE_H */
diff --git a/arch/x86/include/asm/cmdline.h b/arch/x86/include/asm/cmdline.h
new file mode 100644
index 00000000000..e01f7f7ccb0
--- /dev/null
+++ b/arch/x86/include/asm/cmdline.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_CMDLINE_H
+#define _ASM_X86_CMDLINE_H
+
+int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
+
+#endif /* _ASM_X86_CMDLINE_H */
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index b3b73326290..d47786acb01 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -35,7 +35,7 @@ extern void __add_wrong_size(void)
/*
* An exchange-type operation, which takes a value and a pointer, and
- * returns a the old value.
+ * returns the old value.
*/
#define __xchg_op(ptr, arg, op, lock) \
({ \
@@ -43,7 +43,7 @@ extern void __add_wrong_size(void)
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
asm volatile (lock #op "b %b0, %1\n" \
- : "+r" (__ret), "+m" (*(ptr)) \
+ : "+q" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_W: \
@@ -138,9 +138,9 @@ extern void __add_wrong_size(void)
__raw_cmpxchg((ptr), (old), (new), (size), "")
#ifdef CONFIG_X86_32
-# include "cmpxchg_32.h"
+# include <asm/cmpxchg_32.h>
#else
-# include "cmpxchg_64.h"
+# include <asm/cmpxchg_64.h>
#endif
#ifdef __HAVE_ARCH_CMPXCHG
@@ -173,7 +173,7 @@ extern void __add_wrong_size(void)
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
asm volatile (lock "addb %b1, %0\n" \
- : "+m" (*(ptr)) : "ri" (inc) \
+ : "+m" (*(ptr)) : "qi" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_W: \
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 53f4b219336..f8bf2eecab8 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -34,9 +34,7 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
: "memory");
}
-#ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1
-#endif
#ifdef CONFIG_X86_CMPXCHG64
#define cmpxchg64(ptr, o, n) \
@@ -73,59 +71,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
return prev;
}
-#ifndef CONFIG_X86_CMPXCHG
-/*
- * Building a kernel capable running on 80386. It may be necessary to
- * simulate the cmpxchg on the 80386 CPU. For that purpose we define
- * a function for each of the sizes we support.
- */
-
-extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
-extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
-extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
-
-static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- switch (size) {
- case 1:
- return cmpxchg_386_u8(ptr, old, new);
- case 2:
- return cmpxchg_386_u16(ptr, old, new);
- case 4:
- return cmpxchg_386_u32(ptr, old, new);
- }
- return old;
-}
-
-#define cmpxchg(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- if (likely(boot_cpu_data.x86 > 3)) \
- __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
- (unsigned long)(o), (unsigned long)(n), \
- sizeof(*(ptr))); \
- else \
- __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
- (unsigned long)(o), (unsigned long)(n), \
- sizeof(*(ptr))); \
- __ret; \
-})
-#define cmpxchg_local(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- if (likely(boot_cpu_data.x86 > 3)) \
- __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
- (unsigned long)(o), (unsigned long)(n), \
- sizeof(*(ptr))); \
- else \
- __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
- (unsigned long)(o), (unsigned long)(n), \
- sizeof(*(ptr))); \
- __ret; \
-})
-#endif
-
#ifndef CONFIG_X86_CMPXCHG64
/*
* Building a kernel capable running on 80386 and 80486. It may be necessary
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 30d737ef2a4..59c6c401f79 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -6,7 +6,9 @@
*/
#include <linux/types.h>
#include <linux/sched.h>
+#include <asm/processor.h>
#include <asm/user32.h>
+#include <asm/unistd.h>
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "i686\0\0"
@@ -39,6 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 __attribute__((aligned(4))) compat_u64;
+typedef u32 compat_uptr_t;
struct compat_timespec {
compat_time_t tv_sec;
@@ -122,6 +125,78 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word;
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
+
+typedef struct compat_siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[128/sizeof(int) - 3];
+
+ /* kill() */
+ struct {
+ unsigned int _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ int _overrun_incr; /* amount to add to overrun */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ unsigned int _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ unsigned int _pid; /* which child */
+ unsigned int _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+ /* SIGCHLD (x32 version) */
+ struct {
+ unsigned int _pid; /* which child */
+ unsigned int _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_s64 _utime;
+ compat_s64 _stime;
+ } _sigchld_x32;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ unsigned int _addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+
+ struct {
+ unsigned int _call_addr; /* calling insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
+ } _sifields;
+} compat_siginfo_t;
+
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
@@ -186,7 +261,20 @@ struct compat_shmid64_ds {
/*
* The type of struct elf_prstatus.pr_reg in compatible core dumps.
*/
+#ifdef CONFIG_X86_X32_ABI
+typedef struct user_regs_struct compat_elf_gregset_t;
+
+#define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216)
+#define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296)
+#define SET_PR_FPVALID(S,V) \
+ do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \
+ while (0)
+
+#define COMPAT_USE_64BIT_TIME \
+ (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
+#else
typedef struct user_regs_struct32 compat_elf_gregset_t;
+#endif
/*
* A pointer passed in from user mode. This should not
@@ -194,7 +282,6 @@ typedef struct user_regs_struct32 compat_elf_gregset_t;
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
-typedef u32 compat_uptr_t;
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
@@ -208,13 +295,30 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
static inline void __user *arch_compat_alloc_user_space(long len)
{
- struct pt_regs *regs = task_pt_regs(current);
- return (void __user *)regs->sp - len;
+ compat_uptr_t sp;
+
+ if (test_thread_flag(TIF_IA32)) {
+ sp = task_pt_regs(current)->sp;
+ } else {
+ /* -128 for the x32 ABI redzone */
+ sp = this_cpu_read(old_rsp) - 128;
+ }
+
+ return (void __user *)round_down(sp - len, 16);
+}
+
+static inline bool is_x32_task(void)
+{
+#ifdef CONFIG_X86_X32_ABI
+ if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
+ return true;
+#endif
+ return false;
}
-static inline int is_compat_task(void)
+static inline bool is_compat_task(void)
{
- return current_thread_info()->status & TS_COMPAT;
+ return is_ia32_task() || is_x32_task();
}
#endif /* _ASM_X86_COMPAT_H */
diff --git a/arch/x86/include/asm/context_tracking.h b/arch/x86/include/asm/context_tracking.h
new file mode 100644
index 00000000000..1fe49704b14
--- /dev/null
+++ b/arch/x86/include/asm/context_tracking.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_X86_CONTEXT_TRACKING_H
+#define _ASM_X86_CONTEXT_TRACKING_H
+
+#ifdef CONFIG_CONTEXT_TRACKING
+# define SCHEDULE_USER call schedule_user
+#else
+# define SCHEDULE_USER call schedule
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 4564c8e28a3..d2b12988d2e 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -28,6 +28,10 @@ struct x86_cpu {
#ifdef CONFIG_HOTPLUG_CPU
extern int arch_register_cpu(int num);
extern void arch_unregister_cpu(int);
+extern void start_cpu0(void);
+#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
+extern int _debug_hotplug_cpu(int cpu, int action);
+#endif
#endif
DECLARE_PER_CPU(int, cpu_state);
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
new file mode 100644
index 00000000000..ff501e511d9
--- /dev/null
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -0,0 +1,13 @@
+#ifndef _CPU_DEVICE_ID
+#define _CPU_DEVICE_ID 1
+
+/*
+ * Declare drivers belonging to specific x86 CPUs
+ * Similar in spirit to pci_device_id and related PCI functions
+ */
+
+#include <linux/mod_devicetable.h>
+
+extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
+
+#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 8d67d428b0f..e265ff95d16 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -4,9 +4,12 @@
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
+#ifndef _ASM_X86_REQUIRED_FEATURES_H
#include <asm/required-features.h>
+#endif
#define NCAPINTS 10 /* N 32-bit words worth of info */
+#define NBUGINTS 1 /* N 32-bit bug flags */
/*
* Note: If the comment begins with a quoted string, that string is used
@@ -34,7 +37,7 @@
#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */
+#define X86_FEATURE_CLFLUSH (0*32+19) /* CLFLUSH instruction */
#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
@@ -89,7 +92,7 @@
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
- /* 21 available, was AMD_C1E */
+#define X86_FEATURE_ALWAYS (3*32+21) /* "" Always-present feature */
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
@@ -97,6 +100,8 @@
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
+#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
+#define X86_FEATURE_NONSTOP_TSC_S3 (3*32+30) /* TSC doesn't stop in S3 state */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
@@ -164,6 +169,8 @@
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_PERFCTR_L2 (6*32+28) /* L2 performance counter extensions */
/*
* Auxiliary flags: Linux defined - For features scattered in various
@@ -176,7 +183,9 @@
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK (7*32+ 9) /* AMD ProcFeedbackInterface */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -198,11 +207,35 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_TSC_ADJUST (9*32+ 1) /* TSC adjustment MSR 0x3b */
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
+#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
+#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_CLFLUSHOPT (9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_AVX512PF (9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER (9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD (9*32+28) /* AVX-512 Conflict Detection */
+
+/*
+ * BUG word(s)
+ */
+#define X86_BUG(x) (NCAPINTS*32 + (x))
+
+#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* AMD Erratum 400 */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
@@ -266,6 +299,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
+#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
@@ -284,7 +318,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
-#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
@@ -293,18 +327,17 @@ extern const char * const x86_power_flags[32];
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
+#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
+#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
+#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
-
-#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
-# define cpu_has_invlpg 1
-#else
-# define cpu_has_invlpg (boot_cpu_data.x86 > 3)
-#endif
+#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
+#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#ifdef CONFIG_X86_64
@@ -329,16 +362,39 @@ extern const char * const x86_power_flags[32];
#endif /* CONFIG_X86_64 */
#if __GNUC__ >= 4
+extern void warn_pre_alternatives(void);
+extern bool __static_cpu_has_safe(u16 bit);
+
/*
* Static testing of CPU features. Used the same as boot_cpu_has().
* These are only valid after alternatives have run, but will statically
* patch the target code for additional performance.
- *
*/
static __always_inline __pure bool __static_cpu_has(u16 bit)
{
-#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
- asm goto("1: jmp %l[t_no]\n"
+#ifdef CC_HAVE_ASM_GOTO
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+
+ /*
+ * Catch too early usage of this before alternatives
+ * have run.
+ */
+ asm_volatile_goto("1: jmp %l[t_warn]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+ " .long 0\n" /* no replacement */
+ " .word %P0\n" /* 1: do replace */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 0\n" /* replacement len */
+ ".previous\n"
+ /* skipping size check since replacement size = 0 */
+ : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
+
+#endif
+
+ asm_volatile_goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
@@ -352,7 +408,15 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
return true;
t_no:
return false;
-#else
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+ t_warn:
+ warn_pre_alternatives();
+ return false;
+#endif
+
+#else /* CC_HAVE_ASM_GOTO */
+
u8 flag;
/* Open-coded due to __stringify() in ALTERNATIVE() */
asm volatile("1: movb $0,%0\n"
@@ -373,7 +437,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
".previous\n"
: "=qm" (flag) : "i" (bit));
return flag;
-#endif
+
+#endif /* CC_HAVE_ASM_GOTO */
}
#define static_cpu_has(bit) \
@@ -384,13 +449,110 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
__static_cpu_has(bit) : \
boot_cpu_has(bit) \
)
+
+static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+{
+#ifdef CC_HAVE_ASM_GOTO
+/*
+ * We need to spell the jumps to the compiler because, depending on the offset,
+ * the replacement jump can be bigger than the original jump, and this we cannot
+ * have. Thus, we force the jump to the widest, 4-byte, signed relative
+ * offset even though the last would often fit in less bytes.
+ */
+ asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 3f - .\n" /* repl offset */
+ " .word %P1\n" /* always replace */
+ " .byte 2b - 1b\n" /* src len */
+ " .byte 4f - 3f\n" /* repl len */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
+ "4:\n"
+ ".previous\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 0\n" /* no replacement */
+ " .word %P0\n" /* feature bit */
+ " .byte 2b - 1b\n" /* src len */
+ " .byte 0\n" /* repl len */
+ ".previous\n"
+ : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
+ : : t_dynamic, t_no);
+ return true;
+ t_no:
+ return false;
+ t_dynamic:
+ return __static_cpu_has_safe(bit);
+#else
+ u8 flag;
+ /* Open-coded due to __stringify() in ALTERNATIVE() */
+ asm volatile("1: movb $2,%0\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 3f - .\n" /* repl offset */
+ " .word %P2\n" /* always replace */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 4f - 3f\n" /* replacement len */
+ ".previous\n"
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "3: movb $0,%0\n"
+ "4:\n"
+ ".previous\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 5f - .\n" /* repl offset */
+ " .word %P1\n" /* feature bit */
+ " .byte 4b - 3b\n" /* src len */
+ " .byte 6f - 5f\n" /* repl len */
+ ".previous\n"
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "5: movb $1,%0\n"
+ "6:\n"
+ ".previous\n"
+ : "=qm" (flag)
+ : "i" (bit), "i" (X86_FEATURE_ALWAYS));
+ return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
+#endif /* CC_HAVE_ASM_GOTO */
+}
+
+#define static_cpu_has_safe(bit) \
+( \
+ __builtin_constant_p(boot_cpu_has(bit)) ? \
+ boot_cpu_has(bit) : \
+ _static_cpu_has_safe(bit) \
+)
#else
/*
* gcc 3.x is too stupid to do the static test; fall back to dynamic.
*/
-#define static_cpu_has(bit) boot_cpu_has(bit)
+#define static_cpu_has(bit) boot_cpu_has(bit)
+#define static_cpu_has_safe(bit) boot_cpu_has(bit)
#endif
+#define cpu_has_bug(c, bit) cpu_has(c, (bit))
+#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
+#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit));
+
+#define static_cpu_has_bug(bit) static_cpu_has((bit))
+#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
+
+#define MAX_CPU_FEATURES (NCAPINTS * 32)
+#define cpu_have_feature boot_cpu_has
+
+#define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X"
+#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
+ boot_cpu_data.x86_model
+
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/cputime.h b/arch/x86/include/asm/cputime.h
deleted file mode 100644
index 6d68ad7e0ea..00000000000
--- a/arch/x86/include/asm/cputime.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/cputime.h>
diff --git a/arch/x86/include/asm/aes.h b/arch/x86/include/asm/crypto/aes.h
index 80545a1cbe3..80545a1cbe3 100644
--- a/arch/x86/include/asm/aes.h
+++ b/arch/x86/include/asm/crypto/aes.h
diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h
new file mode 100644
index 00000000000..bb93333d920
--- /dev/null
+++ b/arch/x86/include/asm/crypto/camellia.h
@@ -0,0 +1,101 @@
+#ifndef ASM_X86_CAMELLIA_H
+#define ASM_X86_CAMELLIA_H
+
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+
+#define CAMELLIA_MIN_KEY_SIZE 16
+#define CAMELLIA_MAX_KEY_SIZE 32
+#define CAMELLIA_BLOCK_SIZE 16
+#define CAMELLIA_TABLE_BYTE_LEN 272
+#define CAMELLIA_PARALLEL_BLOCKS 2
+
+struct camellia_ctx {
+ u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
+ u32 key_length;
+};
+
+struct camellia_lrw_ctx {
+ struct lrw_table_ctx lrw_table;
+ struct camellia_ctx camellia_ctx;
+};
+
+struct camellia_xts_ctx {
+ struct camellia_ctx tweak_ctx;
+ struct camellia_ctx crypt_ctx;
+};
+
+extern int __camellia_setkey(struct camellia_ctx *cctx,
+ const unsigned char *key,
+ unsigned int key_len, u32 *flags);
+
+extern int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+extern void lrw_camellia_exit_tfm(struct crypto_tfm *tfm);
+
+extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+/* regular block cipher functions */
+asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, bool xor);
+asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+/* 2-way parallel cipher functions */
+asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, bool xor);
+asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+/* 16-way parallel cipher functions (avx/aes-ni) */
+asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+
+asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+
+static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk(ctx, dst, src, false);
+}
+
+static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk(ctx, dst, src, true);
+}
+
+static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk_2way(ctx, dst, src, false);
+}
+
+static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk_2way(ctx, dst, src, true);
+}
+
+/* glue helpers */
+extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src);
+extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+
+extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+
+#endif /* ASM_X86_CAMELLIA_H */
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
new file mode 100644
index 00000000000..1eef55596e8
--- /dev/null
+++ b/arch/x86/include/asm/crypto/glue_helper.h
@@ -0,0 +1,145 @@
+/*
+ * Shared glue code for 128bit block ciphers
+ */
+
+#ifndef _CRYPTO_GLUE_HELPER_H
+#define _CRYPTO_GLUE_HELPER_H
+
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+#include <asm/i387.h>
+#include <crypto/b128ops.h>
+
+typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
+typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
+typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+
+#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
+#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
+#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
+#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn))
+
+struct common_glue_func_entry {
+ unsigned int num_blocks; /* number of blocks that @fn will process */
+ union {
+ common_glue_func_t ecb;
+ common_glue_cbc_func_t cbc;
+ common_glue_ctr_func_t ctr;
+ common_glue_xts_func_t xts;
+ } fn_u;
+};
+
+struct common_glue_ctx {
+ unsigned int num_funcs;
+ int fpu_blocks_limit; /* -1 means fpu not needed at all */
+
+ /*
+ * First funcs entry must have largest num_blocks and last funcs entry
+ * must have num_blocks == 1!
+ */
+ struct common_glue_func_entry funcs[];
+};
+
+static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
+ struct blkcipher_desc *desc,
+ bool fpu_enabled, unsigned int nbytes)
+{
+ if (likely(fpu_blocks_limit < 0))
+ return false;
+
+ if (fpu_enabled)
+ return true;
+
+ /*
+ * Vector-registers are only used when chunk to be processed is large
+ * enough, so do not enable FPU until it is necessary.
+ */
+ if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
+ return false;
+
+ if (desc) {
+ /* prevent sleeping if FPU is in use */
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ }
+
+ kernel_fpu_begin();
+ return true;
+}
+
+static inline void glue_fpu_end(bool fpu_enabled)
+{
+ if (fpu_enabled)
+ kernel_fpu_end();
+}
+
+static inline void le128_to_be128(be128 *dst, const le128 *src)
+{
+ dst->a = cpu_to_be64(le64_to_cpu(src->a));
+ dst->b = cpu_to_be64(le64_to_cpu(src->b));
+}
+
+static inline void be128_to_le128(le128 *dst, const be128 *src)
+{
+ dst->a = cpu_to_le64(be64_to_cpu(src->a));
+ dst->b = cpu_to_le64(be64_to_cpu(src->b));
+}
+
+static inline void le128_inc(le128 *i)
+{
+ u64 a = le64_to_cpu(i->a);
+ u64 b = le64_to_cpu(i->b);
+
+ b++;
+ if (!b)
+ a++;
+
+ i->a = cpu_to_le64(a);
+ i->b = cpu_to_le64(b);
+}
+
+static inline void le128_gf128mul_x_ble(le128 *dst, const le128 *src)
+{
+ u64 a = le64_to_cpu(src->a);
+ u64 b = le64_to_cpu(src->b);
+ u64 _tt = ((s64)a >> 63) & 0x87;
+
+ dst->a = cpu_to_le64((a << 1) ^ (b >> 63));
+ dst->b = cpu_to_le64((b << 1) ^ _tt);
+}
+
+extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes);
+
+extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes);
+
+extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes);
+
+extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes);
+
+extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes,
+ common_glue_func_t tweak_fn, void *tweak_ctx,
+ void *crypt_ctx);
+
+extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv, common_glue_func_t fn);
+
+#endif /* _CRYPTO_GLUE_HELPER_H */
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
new file mode 100644
index 00000000000..33c2b8a435d
--- /dev/null
+++ b/arch/x86/include/asm/crypto/serpent-avx.h
@@ -0,0 +1,48 @@
+#ifndef ASM_X86_SERPENT_AVX_H
+#define ASM_X86_SERPENT_AVX_H
+
+#include <linux/crypto.h>
+#include <crypto/serpent.h>
+
+#define SERPENT_PARALLEL_BLOCKS 8
+
+struct serpent_lrw_ctx {
+ struct lrw_table_ctx lrw_table;
+ struct serpent_ctx serpent_ctx;
+};
+
+struct serpent_xts_ctx {
+ struct serpent_ctx tweak_ctx;
+ struct serpent_ctx crypt_ctx;
+};
+
+asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+
+asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+
+extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+
+extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+
+extern int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+extern void lrw_serpent_exit_tfm(struct crypto_tfm *tfm);
+
+extern int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+#endif
diff --git a/arch/x86/include/asm/serpent.h b/arch/x86/include/asm/crypto/serpent-sse2.h
index d3ef63fe0c8..e6e77dffbda 100644
--- a/arch/x86/include/asm/serpent.h
+++ b/arch/x86/include/asm/crypto/serpent-sse2.h
@@ -1,5 +1,5 @@
-#ifndef ASM_X86_SERPENT_H
-#define ASM_X86_SERPENT_H
+#ifndef ASM_X86_SERPENT_SSE2_H
+#define ASM_X86_SERPENT_SSE2_H
#include <linux/crypto.h>
#include <crypto/serpent.h>
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h
new file mode 100644
index 00000000000..878c51ceebb
--- /dev/null
+++ b/arch/x86/include/asm/crypto/twofish.h
@@ -0,0 +1,46 @@
+#ifndef ASM_X86_TWOFISH_H
+#define ASM_X86_TWOFISH_H
+
+#include <linux/crypto.h>
+#include <crypto/twofish.h>
+#include <crypto/lrw.h>
+#include <crypto/b128ops.h>
+
+struct twofish_lrw_ctx {
+ struct lrw_table_ctx lrw_table;
+ struct twofish_ctx twofish_ctx;
+};
+
+struct twofish_xts_ctx {
+ struct twofish_ctx tweak_ctx;
+ struct twofish_ctx crypt_ctx;
+};
+
+/* regular block cipher functions from twofish_x86_64 module */
+asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+/* 3-way parallel cipher functions */
+asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src, bool xor);
+asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+/* helpers from twofish_x86_64-3way module */
+extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
+extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+
+extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm);
+
+extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+#endif /* ASM_X86_TWOFISH_H */
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 4d447b732d8..9476c04ee63 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task);
static __always_inline struct task_struct *get_current(void)
{
- return percpu_read_stable(current_task);
+ return this_cpu_read_stable(current_task);
}
#define current get_current()
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index b903d5ea394..4b528a970bd 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -2,83 +2,75 @@
#define _ASM_X86_DEBUGREG_H
-/* Indicate the register numbers for a number of the specific
- debug registers. Registers 0-3 contain the addresses we wish to trap on */
-#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
-#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
+#include <linux/bug.h>
+#include <uapi/asm/debugreg.h>
-#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
-#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
-
-/* Define a few things for the status register. We can use this to determine
- which debugging register was responsible for the trap. The other bits
- are either reserved or not of interest to us. */
-
-/* Define reserved bits in DR6 which are always set to 1 */
-#define DR6_RESERVED (0xFFFF0FF0)
-
-#define DR_TRAP0 (0x1) /* db0 */
-#define DR_TRAP1 (0x2) /* db1 */
-#define DR_TRAP2 (0x4) /* db2 */
-#define DR_TRAP3 (0x8) /* db3 */
-#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)
-
-#define DR_STEP (0x4000) /* single-step */
-#define DR_SWITCH (0x8000) /* task switch */
-
-/* Now define a bunch of things for manipulating the control register.
- The top two bytes of the control register consist of 4 fields of 4
- bits - each field corresponds to one of the four debug registers,
- and indicates what types of access we trap on, and how large the data
- field is that we are looking at */
-
-#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
-#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
-
-#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
-#define DR_RW_WRITE (0x1)
-#define DR_RW_READ (0x3)
-
-#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
-#define DR_LEN_2 (0x4)
-#define DR_LEN_4 (0xC)
-#define DR_LEN_8 (0x8)
-
-/* The low byte to the control register determine which registers are
- enabled. There are 4 fields of two bits. One bit is "local", meaning
- that the processor will reset the bit after a task switch and the other
- is global meaning that we have to explicitly reset the bit. With linux,
- you can use either one, since we explicitly zero the register when we enter
- kernel mode. */
-
-#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
-#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
-#define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */
-#define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */
-#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
-
-#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
-#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
-
-/* The second byte to the control register has a few special things.
- We can slow the instruction pipeline for instructions coming via the
- gdt or the ldt if we want to. I am not sure why this is an advantage */
-
-#ifdef __i386__
-#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
-#else
-#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
-#endif
-
-#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
-#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
+DECLARE_PER_CPU(unsigned long, cpu_dr7);
+#ifndef CONFIG_PARAVIRT
/*
- * HW breakpoint additions
+ * These special macros can be used to get or set a debugging register
*/
-#ifdef __KERNEL__
+#define get_debugreg(var, register) \
+ (var) = native_get_debugreg(register)
+#define set_debugreg(value, register) \
+ native_set_debugreg(register, value)
+#endif
-DECLARE_PER_CPU(unsigned long, cpu_dr7);
+static inline unsigned long native_get_debugreg(int regno)
+{
+ unsigned long val = 0; /* Damn you, gcc! */
+
+ switch (regno) {
+ case 0:
+ asm("mov %%db0, %0" :"=r" (val));
+ break;
+ case 1:
+ asm("mov %%db1, %0" :"=r" (val));
+ break;
+ case 2:
+ asm("mov %%db2, %0" :"=r" (val));
+ break;
+ case 3:
+ asm("mov %%db3, %0" :"=r" (val));
+ break;
+ case 6:
+ asm("mov %%db6, %0" :"=r" (val));
+ break;
+ case 7:
+ asm("mov %%db7, %0" :"=r" (val));
+ break;
+ default:
+ BUG();
+ }
+ return val;
+}
+
+static inline void native_set_debugreg(int regno, unsigned long value)
+{
+ switch (regno) {
+ case 0:
+ asm("mov %0, %%db0" ::"r" (value));
+ break;
+ case 1:
+ asm("mov %0, %%db1" ::"r" (value));
+ break;
+ case 2:
+ asm("mov %0, %%db2" ::"r" (value));
+ break;
+ case 3:
+ asm("mov %0, %%db3" ::"r" (value));
+ break;
+ case 6:
+ asm("mov %0, %%db6" ::"r" (value));
+ break;
+ case 7:
+ asm("mov %0, %%db7" ::"r" (value));
+ break;
+ default:
+ BUG();
+ }
+}
static inline void hw_breakpoint_disable(void)
{
@@ -123,6 +115,4 @@ static inline void debug_stack_usage_dec(void) { }
#endif /* X86_64 */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index e95822d683f..50d033a8947 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -6,6 +6,7 @@
#include <asm/mmu.h>
#include <linux/smp.h>
+#include <linux/percpu.h>
static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
{
@@ -35,8 +36,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
extern struct desc_ptr idt_descr;
extern gate_desc idt_table[];
-extern struct desc_ptr nmi_idt_descr;
-extern gate_desc nmi_idt_table[];
+extern struct desc_ptr debug_idt_descr;
+extern gate_desc debug_idt_table[];
struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
@@ -315,10 +316,38 @@ static inline void set_nmi_gate(int gate, void *addr)
gate_desc s;
pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
- write_idt_entry(nmi_idt_table, gate, &s);
+ write_idt_entry(debug_idt_table, gate, &s);
}
#endif
+#ifdef CONFIG_TRACING
+extern struct desc_ptr trace_idt_descr;
+extern gate_desc trace_idt_table[];
+static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+{
+ write_idt_entry(trace_idt_table, entry, gate);
+}
+
+static inline void _trace_set_gate(int gate, unsigned type, void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+{
+ gate_desc s;
+
+ pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
+ /*
+ * does not need to be atomic because it is only done once at
+ * setup time
+ */
+ write_trace_idt_entry(gate, &s);
+}
+#else
+static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+{
+}
+
+#define _trace_set_gate(gate, type, addr, dpl, ist, seg)
+#endif
+
static inline void _set_gate(int gate, unsigned type, void *addr,
unsigned dpl, unsigned ist, unsigned seg)
{
@@ -330,6 +359,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
* setup time
*/
write_idt_entry(idt_table, gate, &s);
+ write_trace_idt_entry(gate, &s);
}
/*
@@ -338,11 +368,14 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
* Pentium F0 0F bugfix can have resulted in the mapped
* IDT being write-protected.
*/
-static inline void set_intr_gate(unsigned int n, void *addr)
-{
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
-}
+#define set_intr_gate(n, addr) \
+ do { \
+ BUG_ON((unsigned)n > 0xFF); \
+ _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
+ __KERNEL_CS); \
+ _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
+ 0, 0, __KERNEL_CS); \
+ } while (0)
extern int first_system_vector;
/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
@@ -359,11 +392,11 @@ static inline void alloc_system_vector(int vector)
}
}
-static inline void alloc_intr_gate(unsigned int n, void *addr)
-{
- alloc_system_vector(n);
- set_intr_gate(n, addr);
-}
+#define alloc_intr_gate(n, addr) \
+ do { \
+ alloc_system_vector(n); \
+ set_intr_gate(n, addr); \
+ } while (0)
/*
* This routine sets up an interrupt gate at directory privilege level 3.
@@ -404,4 +437,70 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
}
+#ifdef CONFIG_X86_64
+DECLARE_PER_CPU(u32, debug_idt_ctr);
+static inline bool is_debug_idt_enabled(void)
+{
+ if (this_cpu_read(debug_idt_ctr))
+ return true;
+
+ return false;
+}
+
+static inline void load_debug_idt(void)
+{
+ load_idt((const struct desc_ptr *)&debug_idt_descr);
+}
+#else
+static inline bool is_debug_idt_enabled(void)
+{
+ return false;
+}
+
+static inline void load_debug_idt(void)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern atomic_t trace_idt_ctr;
+static inline bool is_trace_idt_enabled(void)
+{
+ if (atomic_read(&trace_idt_ctr))
+ return true;
+
+ return false;
+}
+
+static inline void load_trace_idt(void)
+{
+ load_idt((const struct desc_ptr *)&trace_idt_descr);
+}
+#else
+static inline bool is_trace_idt_enabled(void)
+{
+ return false;
+}
+
+static inline void load_trace_idt(void)
+{
+}
+#endif
+
+/*
+ * The load_current_idt() must be called with interrupts disabled
+ * to avoid races. That way the IDT will always be set back to the expected
+ * descriptor. It's also called when a CPU is being initialized, and
+ * that doesn't need to disable interrupts, as nothing should be
+ * bothering the CPU then.
+ */
+static inline void load_current_idt(void)
+{
+ if (is_debug_idt_enabled())
+ load_debug_idt();
+ else if (is_trace_idt_enabled())
+ load_trace_idt();
+ else
+ load_idt((const struct desc_ptr *)&idt_descr);
+}
#endif /* _ASM_X86_DESC_H */
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 63a2a03d7d5..03dd72957d2 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -2,11 +2,8 @@
#define _ASM_X86_DEVICE_H
struct dev_archdata {
-#ifdef CONFIG_ACPI
- void *acpi_handle;
-#endif
-#ifdef CONFIG_X86_64
-struct dma_map_ops *dma_ops;
+#ifdef CONFIG_X86_DEV_DMA_OPS
+ struct dma_map_ops *dma_ops;
#endif
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
new file mode 100644
index 00000000000..b4b38bacb40
--- /dev/null
+++ b/arch/x86/include/asm/dma-contiguous.h
@@ -0,0 +1,12 @@
+#ifndef ASMX86_DMA_CONTIGUOUS_H
+#define ASMX86_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index ed3065fd631..808dae63eee 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -13,6 +13,7 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <asm-generic/dma-coherent.h>
+#include <linux/dma-contiguous.h>
#ifdef CONFIG_ISA
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -30,7 +31,7 @@ extern struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
-#ifdef CONFIG_X86_32
+#ifndef CONFIG_X86_DEV_DMA_OPS
return dma_ops;
#else
if (unlikely(!dev) || !dev->archdata.dma_ops)
@@ -46,6 +47,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ debug_dma_mapping_error(dev, dma_addr);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
@@ -59,7 +61,18 @@ extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag);
+ dma_addr_t *dma_addr, gfp_t flag,
+ struct dma_attrs *attrs);
+
+extern void dma_generic_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs);
+
+#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
+extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
+extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+#else
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
@@ -78,6 +91,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
return daddr;
}
+#endif /* CONFIG_X86_DMA_REMAP */
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
@@ -111,9 +125,11 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
return gfp;
}
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp)
+dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *memory;
@@ -129,18 +145,21 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (!is_device_dma_capable(dev))
return NULL;
- if (!ops->alloc_coherent)
+ if (!ops->alloc)
return NULL;
- memory = ops->alloc_coherent(dev, size, dma_handle,
- dma_alloc_coherent_gfp_flags(dev, gfp));
+ memory = ops->alloc(dev, size, dma_handle,
+ dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t bus)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t bus,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -150,8 +169,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
return;
debug_dma_free_coherent(dev, size, vaddr, bus);
- if (ops->free_coherent)
- ops->free_coherent(dev, size, vaddr, bus);
+ if (ops->free)
+ ops->free(dev, size, vaddr, bus, attrs);
}
#endif
diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h
index fd8f9e2ca35..535192f6bfa 100644
--- a/arch/x86/include/asm/dmi.h
+++ b/arch/x86/include/asm/dmi.h
@@ -13,7 +13,9 @@ static __always_inline __init void *dmi_alloc(unsigned len)
}
/* Use early IO mappings for DMI because it's initialized early */
-#define dmi_ioremap early_ioremap
-#define dmi_iounmap early_iounmap
+#define dmi_early_remap early_ioremap
+#define dmi_early_unmap early_iounmap
+#define dmi_remap ioremap
+#define dmi_unmap iounmap
#endif /* _ASM_X86_DMI_H */
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 37782566af2..779c2efe2e9 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -1,81 +1,14 @@
#ifndef _ASM_X86_E820_H
#define _ASM_X86_E820_H
-#define E820MAP 0x2d0 /* our map */
-#define E820MAX 128 /* number of entries in E820MAP */
-/*
- * Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
- * constrained space in the zeropage. If we have more nodes than
- * that, and if we've booted off EFI firmware, then the EFI tables
- * passed us from the EFI firmware can list more nodes. Size our
- * internal memory map tables to have room for these additional
- * nodes, based on up to three entries per node for which the
- * kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
- * plus E820MAX, allowing space for the possible duplicate E820
- * entries that might need room in the same arrays, prior to the
- * call to sanitize_e820_map() to remove duplicates. The allowance
- * of three memory map entries per node is "enough" entries for
- * the initial hardware platform motivating this mechanism to make
- * use of additional EFI map entries. Future platforms may want
- * to allow more than three entries per node or otherwise refine
- * this size.
- */
-
-/*
- * Odd: 'make headers_check' complains about numa.h if I try
- * to collapse the next two #ifdef lines to a single line:
- * #if defined(__KERNEL__) && defined(CONFIG_EFI)
- */
-#ifdef __KERNEL__
#ifdef CONFIG_EFI
#include <linux/numa.h>
#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
#else /* ! CONFIG_EFI */
#define E820_X_MAX E820MAX
#endif
-#else /* ! __KERNEL__ */
-#define E820_X_MAX E820MAX
-#endif
-
-#define E820NR 0x1e8 /* # entries in E820MAP */
-
-#define E820_RAM 1
-#define E820_RESERVED 2
-#define E820_ACPI 3
-#define E820_NVS 4
-#define E820_UNUSABLE 5
-
-/*
- * reserved RAM used by kernel itself
- * if CONFIG_INTEL_TXT is enabled, memory of this type will be
- * included in the S3 integrity calculation and so should not include
- * any memory that BIOS might alter over the S3 transition
- */
-#define E820_RESERVED_KERN 128
-
+#include <uapi/asm/e820.h>
#ifndef __ASSEMBLY__
-#include <linux/types.h>
-struct e820entry {
- __u64 addr; /* start of memory segment */
- __u64 size; /* size of memory segment */
- __u32 type; /* type of memory segment */
-} __attribute__((packed));
-
-struct e820map {
- __u32 nr_map;
- struct e820entry map[E820_X_MAX];
-};
-
-#define ISA_START_ADDRESS 0xa0000
-#define ISA_END_ADDRESS 0x100000
-
-#define BIOS_BEGIN 0x000a0000
-#define BIOS_END 0x00100000
-
-#define BIOS_ROM_BASE 0xffe00000
-#define BIOS_ROM_END 0xffffffff
-
-#ifdef __KERNEL__
/* see comment in arch/x86/kernel/e820.c */
extern struct e820map e820;
extern struct e820map e820_saved;
@@ -96,7 +29,7 @@ extern void e820_setup_gap(void);
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
unsigned long start_addr, unsigned long long end_addr);
struct setup_data;
-extern void parse_e820_ext(struct setup_data *data);
+extern void parse_e820_ext(u64 phys_addr, u32 data_len);
#if defined(CONFIG_X86_64) || \
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
@@ -137,13 +70,8 @@ static inline bool is_ISA_range(u64 s, u64 e)
return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS;
}
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
#include <linux/ioport.h>
#define HIGH_MEMORY (1024*1024)
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_E820_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 844f735fd63..1eb5f6433ad 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -1,105 +1,162 @@
#ifndef _ASM_X86_EFI_H
#define _ASM_X86_EFI_H
+#include <asm/i387.h>
+/*
+ * We map the EFI regions needed for runtime services non-contiguously,
+ * with preserved alignment on virtual addresses starting from -4G down
+ * for a total max space of 64G. This way, we provide for stable runtime
+ * services addresses across kernels so that a kexec'd kernel can still
+ * use them.
+ *
+ * This is the main reason why we're doing stable VA mappings for RT
+ * services.
+ *
+ * This flag is used in conjuction with a chicken bit called
+ * "efi=old_map" which can be used as a fallback to the old runtime
+ * services mapping method in case there's some b0rkage with a
+ * particular EFI implementation (haha, it is hard to hold up the
+ * sarcasm here...).
+ */
+#define EFI_OLD_MEMMAP EFI_ARCH_1
+
+#define EFI32_LOADER_SIGNATURE "EL32"
+#define EFI64_LOADER_SIGNATURE "EL64"
+
#ifdef CONFIG_X86_32
-#define EFI_LOADER_SIGNATURE "EL32"
extern unsigned long asmlinkage efi_call_phys(void *, ...);
-#define efi_call_phys0(f) efi_call_phys(f)
-#define efi_call_phys1(f, a1) efi_call_phys(f, a1)
-#define efi_call_phys2(f, a1, a2) efi_call_phys(f, a1, a2)
-#define efi_call_phys3(f, a1, a2, a3) efi_call_phys(f, a1, a2, a3)
-#define efi_call_phys4(f, a1, a2, a3, a4) \
- efi_call_phys(f, a1, a2, a3, a4)
-#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
- efi_call_phys(f, a1, a2, a3, a4, a5)
-#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
- efi_call_phys(f, a1, a2, a3, a4, a5, a6)
/*
* Wrap all the virtual calls in a way that forces the parameters on the stack.
*/
+/* Use this macro if your virtual returns a non-void value */
#define efi_call_virt(f, args...) \
- ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
-
-#define efi_call_virt0(f) efi_call_virt(f)
-#define efi_call_virt1(f, a1) efi_call_virt(f, a1)
-#define efi_call_virt2(f, a1, a2) efi_call_virt(f, a1, a2)
-#define efi_call_virt3(f, a1, a2, a3) efi_call_virt(f, a1, a2, a3)
-#define efi_call_virt4(f, a1, a2, a3, a4) \
- efi_call_virt(f, a1, a2, a3, a4)
-#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
- efi_call_virt(f, a1, a2, a3, a4, a5)
-#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
- efi_call_virt(f, a1, a2, a3, a4, a5, a6)
-
-#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
+({ \
+ efi_status_t __s; \
+ kernel_fpu_begin(); \
+ __s = ((efi_##f##_t __attribute__((regparm(0)))*) \
+ efi.systab->runtime->f)(args); \
+ kernel_fpu_end(); \
+ __s; \
+})
+
+/* Use this macro if your virtual call does not return any value */
+#define __efi_call_virt(f, args...) \
+({ \
+ kernel_fpu_begin(); \
+ ((efi_##f##_t __attribute__((regparm(0)))*) \
+ efi.systab->runtime->f)(args); \
+ kernel_fpu_end(); \
+})
+
+#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
#else /* !CONFIG_X86_32 */
#define EFI_LOADER_SIGNATURE "EL64"
-extern u64 efi_call0(void *fp);
-extern u64 efi_call1(void *fp, u64 arg1);
-extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
-extern u64 efi_call3(void *fp, u64 arg1, u64 arg2, u64 arg3);
-extern u64 efi_call4(void *fp, u64 arg1, u64 arg2, u64 arg3, u64 arg4);
-extern u64 efi_call5(void *fp, u64 arg1, u64 arg2, u64 arg3,
- u64 arg4, u64 arg5);
-extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
- u64 arg4, u64 arg5, u64 arg6);
-
-#define efi_call_phys0(f) \
- efi_call0((void *)(f))
-#define efi_call_phys1(f, a1) \
- efi_call1((void *)(f), (u64)(a1))
-#define efi_call_phys2(f, a1, a2) \
- efi_call2((void *)(f), (u64)(a1), (u64)(a2))
-#define efi_call_phys3(f, a1, a2, a3) \
- efi_call3((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3))
-#define efi_call_phys4(f, a1, a2, a3, a4) \
- efi_call4((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
- (u64)(a4))
-#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
- efi_call5((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
- (u64)(a4), (u64)(a5))
-#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
- efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
- (u64)(a4), (u64)(a5), (u64)(a6))
-
-#define efi_call_virt0(f) \
- efi_call0((void *)(efi.systab->runtime->f))
-#define efi_call_virt1(f, a1) \
- efi_call1((void *)(efi.systab->runtime->f), (u64)(a1))
-#define efi_call_virt2(f, a1, a2) \
- efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
-#define efi_call_virt3(f, a1, a2, a3) \
- efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3))
-#define efi_call_virt4(f, a1, a2, a3, a4) \
- efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3), (u64)(a4))
-#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
- efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3), (u64)(a4), (u64)(a5))
-#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
- efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
+extern u64 asmlinkage efi_call(void *fp, ...);
+
+#define efi_call_phys(f, args...) efi_call((f), args)
+
+#define efi_call_virt(f, ...) \
+({ \
+ efi_status_t __s; \
+ \
+ efi_sync_low_kernel_mappings(); \
+ preempt_disable(); \
+ __kernel_fpu_begin(); \
+ __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
+ __kernel_fpu_end(); \
+ preempt_enable(); \
+ __s; \
+})
+
+/*
+ * All X86_64 virt calls return non-void values. Thus, use non-void call for
+ * virt calls that would be void on X86_32.
+ */
+#define __efi_call_virt(f, args...) efi_call_virt(f, args)
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
- u32 type);
+ u32 type, u64 attribute);
#endif /* CONFIG_X86_32 */
extern int add_efi_memmap;
+extern struct efi_scratch efi_scratch;
extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
-extern void efi_memblock_x86_reserve_range(void);
+extern int efi_memblock_x86_reserve_range(void);
extern void efi_call_phys_prelog(void);
extern void efi_call_phys_epilog(void);
-
-#ifndef CONFIG_EFI
+extern void efi_unmap_memmap(void);
+extern void efi_memory_uc(u64 addr, unsigned long size);
+extern void __init efi_map_region(efi_memory_desc_t *md);
+extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
+extern void efi_sync_low_kernel_mappings(void);
+extern int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+extern void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+extern void __init old_map_region(efi_memory_desc_t *md);
+extern void __init runtime_code_page_mkexec(void);
+extern void __init efi_runtime_mkexec(void);
+extern void __init efi_dump_pagetable(void);
+extern void __init efi_apply_memmap_quirks(void);
+
+struct efi_setup_data {
+ u64 fw_vendor;
+ u64 runtime;
+ u64 tables;
+ u64 smbios;
+ u64 reserved[8];
+};
+
+extern u64 efi_setup;
+
+#ifdef CONFIG_EFI
+
+static inline bool efi_is_native(void)
+{
+ return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
+}
+
+static inline bool efi_runtime_supported(void)
+{
+ if (efi_is_native())
+ return true;
+
+ if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
+ return true;
+
+ return false;
+}
+
+extern struct console early_efi_console;
+extern void parse_efi_setup(u64 phys_addr, u32 data_len);
+
+#ifdef CONFIG_EFI_MIXED
+extern void efi_thunk_runtime_setup(void);
+extern efi_status_t efi_thunk_set_virtual_address_map(
+ void *phys_set_virtual_address_map,
+ unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map);
+#else
+static inline void efi_thunk_runtime_setup(void) {}
+static inline efi_status_t efi_thunk_set_virtual_address_map(
+ void *phys_set_virtual_address_map,
+ unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map)
+{
+ return EFI_SUCCESS;
+}
+#endif /* CONFIG_EFI_MIXED */
+#else
/*
* IF EFI is not configured, have the EFI calls return -ENOSYS.
*/
@@ -110,6 +167,7 @@ extern void efi_call_phys_epilog(void);
#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS)
#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS)
#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS)
+static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
#endif /* CONFIG_EFI */
#endif /* _ASM_X86_EFI_H */
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 5f962df30d0..1a055c81d86 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -75,7 +75,12 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
#include <asm/vdso.h>
-extern unsigned int vdso_enabled;
+#ifdef CONFIG_X86_64
+extern unsigned int vdso64_enabled;
+#endif
+#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
+extern unsigned int vdso32_enabled;
+#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
@@ -84,7 +89,6 @@ extern unsigned int vdso_enabled;
(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
#include <asm/processor.h>
-#include <asm/system.h>
#ifdef CONFIG_X86_32
#include <asm/desc.h>
@@ -156,7 +160,12 @@ do { \
#define elf_check_arch(x) \
((x)->e_machine == EM_X86_64)
-#define compat_elf_check_arch(x) elf_check_arch_ia32(x)
+#define compat_elf_check_arch(x) \
+ (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
+
+#if __USER32_DS != __USER_DS
+# error "The following code assumes __USER32_DS == __USER_DS"
+#endif
static inline void elf_common_init(struct thread_struct *t,
struct pt_regs *regs, const u16 ds)
@@ -179,8 +188,9 @@ static inline void elf_common_init(struct thread_struct *t,
void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp);
#define compat_start_thread start_thread_ia32
-void set_personality_ia32(void);
-#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32()
+void set_personality_ia32(bool);
+#define COMPAT_SET_PERSONALITY(ex) \
+ set_personality_ia32((ex).e_machine == EM_X86_64)
#define COMPAT_ELF_PLATFORM ("i686")
@@ -264,9 +274,9 @@ extern int force_personality32;
struct task_struct;
-#define ARCH_DLINFO_IA32(vdso_enabled) \
+#define ARCH_DLINFO_IA32 \
do { \
- if (vdso_enabled) { \
+ if (vdso32_enabled) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \
@@ -276,29 +286,37 @@ do { \
#define STACK_RND_MASK (0x7ff)
-#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
-
-#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled)
+#define ARCH_DLINFO ARCH_DLINFO_IA32
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#else /* CONFIG_X86_32 */
-#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */
-
/* 1GB for 64bit, 8MB for 32bit */
-#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
+#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
#define ARCH_DLINFO \
do { \
- if (vdso_enabled) \
+ if (vdso64_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
- (unsigned long)current->mm->context.vdso); \
+ (unsigned long __force)current->mm->context.vdso); \
+} while (0)
+
+/* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
+#define ARCH_DLINFO_X32 \
+do { \
+ if (vdso64_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long __force)current->mm->context.vdso); \
} while (0)
#define AT_SYSINFO 32
-#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32)
+#define COMPAT_ARCH_DLINFO \
+if (test_thread_flag(TIF_X32)) \
+ ARCH_DLINFO_X32; \
+else \
+ ARCH_DLINFO_IA32
#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
@@ -307,16 +325,17 @@ do { \
#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
#define VDSO_ENTRY \
- ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+ ((unsigned long)current->mm->context.vdso + \
+ selected_vdso32->sym___kernel_vsyscall)
struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
-
-extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
-#define compat_arch_setup_additional_pages syscall32_setup_pages
+extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
@@ -330,18 +349,16 @@ static inline int mmap_is_ia32(void)
return 1;
#endif
#ifdef CONFIG_IA32_EMULATION
- if (test_thread_flag(TIF_IA32))
+ if (test_thread_flag(TIF_ADDR32))
return 1;
#endif
return 0;
}
-/* The first two values are special, do not change. See align_addr() */
+/* Do not change the values. See get_align_mask() */
enum align_flags {
ALIGN_VA_32 = BIT(0),
ALIGN_VA_64 = BIT(1),
- ALIGN_VDSO = BIT(2),
- ALIGN_TOPDOWN = BIT(3),
};
struct va_alignment {
@@ -350,5 +367,5 @@ struct va_alignment {
} ____cacheline_aligned;
extern struct va_alignment va_align;
-extern unsigned long align_addr(unsigned long, struct file *, enum align_flags);
+extern unsigned long align_vdso_addr(unsigned long);
#endif /* _ASM_X86_ELF_H */
diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
index cc70c1c78ca..77a99ac06d0 100644
--- a/arch/x86/include/asm/emergency-restart.h
+++ b/arch/x86/include/asm/emergency-restart.h
@@ -1,20 +1,6 @@
#ifndef _ASM_X86_EMERGENCY_RESTART_H
#define _ASM_X86_EMERGENCY_RESTART_H
-enum reboot_type {
- BOOT_TRIPLE = 't',
- BOOT_KBD = 'k',
-#ifdef CONFIG_X86_32
- BOOT_BIOS = 'b',
-#endif
- BOOT_ACPI = 'a',
- BOOT_EFI = 'e',
- BOOT_CF9 = 'p',
- BOOT_CF9_COND = 'q',
-};
-
-extern enum reboot_type reboot_type;
-
extern void machine_emergency_restart(void);
#endif /* _ASM_X86_EMERGENCY_RESTART_H */
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 0baa628e330..dc5fa661465 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -13,21 +13,18 @@
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
-BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
-BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
-
-.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
- 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
-.if NUM_INVALIDATE_TLB_VECTORS > \idx
-BUILD_INTERRUPT3(invalidate_interrupt\idx,
- (INVALIDATE_TLB_VECTOR_START)+\idx,
- smp_invalidate_interrupt)
-.endif
-.endr
+BUILD_INTERRUPT3(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR,
+ smp_irq_move_cleanup_interrupt)
+BUILD_INTERRUPT3(reboot_interrupt, REBOOT_VECTOR, smp_reboot_interrupt)
#endif
BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
+#ifdef CONFIG_HAVE_KVM
+BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR,
+ smp_kvm_posted_intr_ipi)
+#endif
+
/*
* every pentium local APIC has two 'local interrupts', with a
* soft-definable vector attached to both interrupts, one of
diff --git a/arch/x86/include/asm/errno.h b/arch/x86/include/asm/errno.h
deleted file mode 100644
index 4c82b503d92..00000000000
--- a/arch/x86/include/asm/errno.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/errno.h>
diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
new file mode 100644
index 00000000000..99efebb2f69
--- /dev/null
+++ b/arch/x86/include/asm/espfix.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_X86_ESPFIX_H
+#define _ASM_X86_ESPFIX_H
+
+#ifdef CONFIG_X86_64
+
+#include <asm/percpu.h>
+
+DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
+DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
+
+extern void init_espfix_bsp(void);
+extern void init_espfix_ap(void);
+
+#endif /* CONFIG_X86_64 */
+
+#endif /* _ASM_X86_ESPFIX_H */
diff --git a/arch/x86/include/asm/exec.h b/arch/x86/include/asm/exec.h
new file mode 100644
index 00000000000..54c2e1db274
--- /dev/null
+++ b/arch/x86/include/asm/exec.h
@@ -0,0 +1 @@
+/* define arch_align_stack() here */
diff --git a/arch/x86/include/asm/fcntl.h b/arch/x86/include/asm/fcntl.h
deleted file mode 100644
index 46ab12db573..00000000000
--- a/arch/x86/include/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 4da3c0c4c97..b0910f97a3e 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -19,11 +19,12 @@
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
+#include <asm/pvclock.h>
#ifdef CONFIG_X86_32
#include <linux/threads.h>
#include <asm/kmap_types.h>
#else
-#include <asm/vsyscall.h>
+#include <uapi/asm/vsyscall.h>
#endif
/*
@@ -39,15 +40,9 @@
*/
extern unsigned long __FIXADDR_TOP;
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
-
-#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
-#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
#else
-#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
-
-/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
-#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
-#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
+#define FIXADDR_TOP (round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \
+ PAGE_SIZE)
#endif
@@ -73,13 +68,12 @@ extern unsigned long __FIXADDR_TOP;
enum fixed_addresses {
#ifdef CONFIG_X86_32
FIX_HOLE,
- FIX_VDSO,
#else
- VSYSCALL_LAST_PAGE,
- VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
- + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
- VVAR_PAGE,
- VSYSCALL_HPET,
+ VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
+#ifdef CONFIG_PARAVIRT_CLOCK
+ PVCLOCK_FIXMAP_BEGIN,
+ PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
+#endif
#endif
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
@@ -93,18 +87,7 @@ enum fixed_addresses {
FIX_IO_APIC_BASE_0,
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
#endif
-#ifdef CONFIG_X86_VISWS_APIC
- FIX_CO_CPU, /* Cobalt timer */
- FIX_CO_APIC, /* Cobalt APIC Redirection Table */
- FIX_LI_PCIA, /* Lithium PCI Bridge A */
- FIX_LI_PCIB, /* Lithium PCI Bridge B */
-#endif
-#ifdef CONFIG_X86_F00F_BUG
- FIX_F00F_IDT, /* Virtual mapping for IDT */
-#endif
-#ifdef CONFIG_X86_CYCLONE_TIMER
- FIX_CYCLONE_TIMER, /*cyclone timer register*/
-#endif
+ FIX_RO_IDT, /* Virtual mapping for read-only IDT */
#ifdef CONFIG_X86_32
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
@@ -175,64 +158,13 @@ static inline void __set_fixmap(enum fixed_addresses idx,
}
#endif
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-
-#define clear_fixmap(idx) \
- __set_fixmap(idx, 0, __pgprot(0))
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without translation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
-
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
-
-/* Return an pointer with offset calculated */
-static __always_inline unsigned long
-__set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
-{
- __set_fixmap(idx, phys, flags);
- return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1));
-}
+#include <asm-generic/fixmap.h>
-#define set_fixmap_offset(idx, phys) \
- __set_fixmap_offset(idx, phys, PAGE_KERNEL)
+#define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
+#define __late_clear_fixmap(idx) __set_fixmap(idx, 0, __pgprot(0))
-#define set_fixmap_offset_nocache(idx, phys) \
- __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE)
+void __early_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
index dbe82a5c5ea..1c7eefe3250 100644
--- a/arch/x86/include/asm/floppy.h
+++ b/arch/x86/include/asm/floppy.h
@@ -99,7 +99,7 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
virtual_dma_residue += virtual_dma_count;
virtual_dma_count = 0;
#ifdef TRACE_FLPY_INT
- printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
+ printk(KERN_DEBUG "count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
virtual_dma_count, virtual_dma_residue, calls, bytes,
dma_wait);
calls = 0;
@@ -145,10 +145,10 @@ static int fd_request_irq(void)
{
if (can_use_virtual_dma)
return request_irq(FLOPPY_IRQ, floppy_hardint,
- IRQF_DISABLED, "floppy", NULL);
+ 0, "floppy", NULL);
else
return request_irq(FLOPPY_IRQ, floppy_interrupt,
- IRQF_DISABLED, "floppy", NULL);
+ 0, "floppy", NULL);
}
static unsigned long dma_mem_alloc(unsigned long size)
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
new file mode 100644
index 00000000000..115e3689cd5
--- /dev/null
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -0,0 +1,619 @@
+/*
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ * x86-64 work by Andi Kleen 2002
+ */
+
+#ifndef _FPU_INTERNAL_H
+#define _FPU_INTERNAL_H
+
+#include <linux/kernel_stat.h>
+#include <linux/regset.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <asm/asm.h>
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+#include <asm/sigcontext.h>
+#include <asm/user.h>
+#include <asm/uaccess.h>
+#include <asm/xsave.h>
+#include <asm/smap.h>
+
+#ifdef CONFIG_X86_64
+# include <asm/sigcontext32.h>
+# include <asm/user32.h>
+struct ksignal;
+int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+ compat_sigset_t *set, struct pt_regs *regs);
+int ia32_setup_frame(int sig, struct ksignal *ksig,
+ compat_sigset_t *set, struct pt_regs *regs);
+#else
+# define user_i387_ia32_struct user_i387_struct
+# define user32_fxsr_struct user_fxsr_struct
+# define ia32_setup_frame __setup_frame
+# define ia32_setup_rt_frame __setup_rt_frame
+#endif
+
+extern unsigned int mxcsr_feature_mask;
+extern void fpu_init(void);
+extern void eager_fpu_init(void);
+
+DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
+
+extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
+ struct task_struct *tsk);
+extern void convert_to_fxsr(struct task_struct *tsk,
+ const struct user_i387_ia32_struct *env);
+
+extern user_regset_active_fn fpregs_active, xfpregs_active;
+extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
+ xstateregs_get;
+extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
+ xstateregs_set;
+
+/*
+ * xstateregs_active == fpregs_active. Please refer to the comment
+ * at the definition of fpregs_active.
+ */
+#define xstateregs_active fpregs_active
+
+#ifdef CONFIG_MATH_EMULATION
+extern void finit_soft_fpu(struct i387_soft_struct *soft);
+#else
+static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
+#endif
+
+static inline int is_ia32_compat_frame(void)
+{
+ return config_enabled(CONFIG_IA32_EMULATION) &&
+ test_thread_flag(TIF_IA32);
+}
+
+static inline int is_ia32_frame(void)
+{
+ return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
+}
+
+static inline int is_x32_frame(void)
+{
+ return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
+}
+
+#define X87_FSW_ES (1 << 7) /* Exception Summary */
+
+static __always_inline __pure bool use_eager_fpu(void)
+{
+ return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
+}
+
+static __always_inline __pure bool use_xsaveopt(void)
+{
+ return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
+}
+
+static __always_inline __pure bool use_xsave(void)
+{
+ return static_cpu_has_safe(X86_FEATURE_XSAVE);
+}
+
+static __always_inline __pure bool use_fxsr(void)
+{
+ return static_cpu_has_safe(X86_FEATURE_FXSR);
+}
+
+static inline void fx_finit(struct i387_fxsave_struct *fx)
+{
+ memset(fx, 0, xstate_size);
+ fx->cwd = 0x37f;
+ fx->mxcsr = MXCSR_DEFAULT;
+}
+
+extern void __sanitize_i387_state(struct task_struct *);
+
+static inline void sanitize_i387_state(struct task_struct *tsk)
+{
+ if (!use_xsaveopt())
+ return;
+ __sanitize_i387_state(tsk);
+}
+
+#define user_insn(insn, output, input...) \
+({ \
+ int err; \
+ asm volatile(ASM_STAC "\n" \
+ "1:" #insn "\n\t" \
+ "2: " ASM_CLAC "\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl $-1,%[err]\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : [err] "=r" (err), output \
+ : "0"(0), input); \
+ err; \
+})
+
+#define check_insn(insn, output, input...) \
+({ \
+ int err; \
+ asm volatile("1:" #insn "\n\t" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl $-1,%[err]\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : [err] "=r" (err), output \
+ : "0"(0), input); \
+ err; \
+})
+
+static inline int fsave_user(struct i387_fsave_struct __user *fx)
+{
+ return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
+}
+
+static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
+{
+ if (config_enabled(CONFIG_X86_32))
+ return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
+ else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
+
+ /* See comment in fpu_fxsave() below. */
+ return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
+}
+
+static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
+{
+ if (config_enabled(CONFIG_X86_32))
+ return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+ else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+
+ /* See comment in fpu_fxsave() below. */
+ return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
+ "m" (*fx));
+}
+
+static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
+{
+ if (config_enabled(CONFIG_X86_32))
+ return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+ else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+
+ /* See comment in fpu_fxsave() below. */
+ return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
+ "m" (*fx));
+}
+
+static inline int frstor_checking(struct i387_fsave_struct *fx)
+{
+ return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
+static inline int frstor_user(struct i387_fsave_struct __user *fx)
+{
+ return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
+static inline void fpu_fxsave(struct fpu *fpu)
+{
+ if (config_enabled(CONFIG_X86_32))
+ asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
+ else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
+ else {
+ /* Using "rex64; fxsave %0" is broken because, if the memory
+ * operand uses any extended registers for addressing, a second
+ * REX prefix will be generated (to the assembler, rex64
+ * followed by semicolon is a separate instruction), and hence
+ * the 64-bitness is lost.
+ *
+ * Using "fxsaveq %0" would be the ideal choice, but is only
+ * supported starting with gas 2.16.
+ *
+ * Using, as a workaround, the properly prefixed form below
+ * isn't accepted by any binutils version so far released,
+ * complaining that the same type of prefix is used twice if
+ * an extended register is needed for addressing (fix submitted
+ * to mainline 2005-11-21).
+ *
+ * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
+ *
+ * This, however, we can work around by forcing the compiler to
+ * select an addressing mode that doesn't require extended
+ * registers.
+ */
+ asm volatile( "rex64/fxsave (%[fx])"
+ : "=m" (fpu->state->fxsave)
+ : [fx] "R" (&fpu->state->fxsave));
+ }
+}
+
+/*
+ * These must be called with preempt disabled. Returns
+ * 'true' if the FPU state is still intact.
+ */
+static inline int fpu_save_init(struct fpu *fpu)
+{
+ if (use_xsave()) {
+ fpu_xsave(fpu);
+
+ /*
+ * xsave header may indicate the init state of the FP.
+ */
+ if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
+ return 1;
+ } else if (use_fxsr()) {
+ fpu_fxsave(fpu);
+ } else {
+ asm volatile("fnsave %[fx]; fwait"
+ : [fx] "=m" (fpu->state->fsave));
+ return 0;
+ }
+
+ /*
+ * If exceptions are pending, we need to clear them so
+ * that we don't randomly get exceptions later.
+ *
+ * FIXME! Is this perhaps only true for the old-style
+ * irq13 case? Maybe we could leave the x87 state
+ * intact otherwise?
+ */
+ if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
+ asm volatile("fnclex");
+ return 0;
+ }
+ return 1;
+}
+
+static inline int __save_init_fpu(struct task_struct *tsk)
+{
+ return fpu_save_init(&tsk->thread.fpu);
+}
+
+static inline int fpu_restore_checking(struct fpu *fpu)
+{
+ if (use_xsave())
+ return fpu_xrstor_checking(&fpu->state->xsave);
+ else if (use_fxsr())
+ return fxrstor_checking(&fpu->state->fxsave);
+ else
+ return frstor_checking(&fpu->state->fsave);
+}
+
+static inline int restore_fpu_checking(struct task_struct *tsk)
+{
+ /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
+ values. "m" is a random variable that should be in L1 */
+ if (unlikely(static_cpu_has_safe(X86_FEATURE_FXSAVE_LEAK))) {
+ asm volatile(
+ "fnclex\n\t"
+ "emms\n\t"
+ "fildl %P[addr]" /* set F?P to defined value */
+ : : [addr] "m" (tsk->thread.fpu.has_fpu));
+ }
+
+ return fpu_restore_checking(&tsk->thread.fpu);
+}
+
+/*
+ * Software FPU state helpers. Careful: these need to
+ * be preemption protection *and* they need to be
+ * properly paired with the CR0.TS changes!
+ */
+static inline int __thread_has_fpu(struct task_struct *tsk)
+{
+ return tsk->thread.fpu.has_fpu;
+}
+
+/* Must be paired with an 'stts' after! */
+static inline void __thread_clear_has_fpu(struct task_struct *tsk)
+{
+ tsk->thread.fpu.has_fpu = 0;
+ this_cpu_write(fpu_owner_task, NULL);
+}
+
+/* Must be paired with a 'clts' before! */
+static inline void __thread_set_has_fpu(struct task_struct *tsk)
+{
+ tsk->thread.fpu.has_fpu = 1;
+ this_cpu_write(fpu_owner_task, tsk);
+}
+
+/*
+ * Encapsulate the CR0.TS handling together with the
+ * software flag.
+ *
+ * These generally need preemption protection to work,
+ * do try to avoid using these on their own.
+ */
+static inline void __thread_fpu_end(struct task_struct *tsk)
+{
+ __thread_clear_has_fpu(tsk);
+ if (!use_eager_fpu())
+ stts();
+}
+
+static inline void __thread_fpu_begin(struct task_struct *tsk)
+{
+ if (!static_cpu_has_safe(X86_FEATURE_EAGER_FPU))
+ clts();
+ __thread_set_has_fpu(tsk);
+}
+
+static inline void __drop_fpu(struct task_struct *tsk)
+{
+ if (__thread_has_fpu(tsk)) {
+ /* Ignore delayed exceptions from user space */
+ asm volatile("1: fwait\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b));
+ __thread_fpu_end(tsk);
+ }
+}
+
+static inline void drop_fpu(struct task_struct *tsk)
+{
+ /*
+ * Forget coprocessor state..
+ */
+ preempt_disable();
+ tsk->thread.fpu_counter = 0;
+ __drop_fpu(tsk);
+ clear_used_math();
+ preempt_enable();
+}
+
+static inline void drop_init_fpu(struct task_struct *tsk)
+{
+ if (!use_eager_fpu())
+ drop_fpu(tsk);
+ else {
+ if (use_xsave())
+ xrstor_state(init_xstate_buf, -1);
+ else
+ fxrstor_checking(&init_xstate_buf->i387);
+ }
+}
+
+/*
+ * FPU state switching for scheduling.
+ *
+ * This is a two-stage process:
+ *
+ * - switch_fpu_prepare() saves the old state and
+ * sets the new state of the CR0.TS bit. This is
+ * done within the context of the old process.
+ *
+ * - switch_fpu_finish() restores the new state as
+ * necessary.
+ */
+typedef struct { int preload; } fpu_switch_t;
+
+/*
+ * Must be run with preemption disabled: this clears the fpu_owner_task,
+ * on this CPU.
+ *
+ * This will disable any lazy FPU state restore of the current FPU state,
+ * but if the current thread owns the FPU, it will still be saved by.
+ */
+static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+{
+ per_cpu(fpu_owner_task, cpu) = NULL;
+}
+
+static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
+{
+ return new == this_cpu_read_stable(fpu_owner_task) &&
+ cpu == new->thread.fpu.last_cpu;
+}
+
+static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
+{
+ fpu_switch_t fpu;
+
+ /*
+ * If the task has used the math, pre-load the FPU on xsave processors
+ * or if the past 5 consecutive context-switches used math.
+ */
+ fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
+ new->thread.fpu_counter > 5);
+ if (__thread_has_fpu(old)) {
+ if (!__save_init_fpu(old))
+ cpu = ~0;
+ old->thread.fpu.last_cpu = cpu;
+ old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
+
+ /* Don't change CR0.TS if we just switch! */
+ if (fpu.preload) {
+ new->thread.fpu_counter++;
+ __thread_set_has_fpu(new);
+ prefetch(new->thread.fpu.state);
+ } else if (!use_eager_fpu())
+ stts();
+ } else {
+ old->thread.fpu_counter = 0;
+ old->thread.fpu.last_cpu = ~0;
+ if (fpu.preload) {
+ new->thread.fpu_counter++;
+ if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
+ fpu.preload = 0;
+ else
+ prefetch(new->thread.fpu.state);
+ __thread_fpu_begin(new);
+ }
+ }
+ return fpu;
+}
+
+/*
+ * By the time this gets called, we've already cleared CR0.TS and
+ * given the process the FPU if we are going to preload the FPU
+ * state - all we need to do is to conditionally restore the register
+ * state itself.
+ */
+static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
+{
+ if (fpu.preload) {
+ if (unlikely(restore_fpu_checking(new)))
+ drop_init_fpu(new);
+ }
+}
+
+/*
+ * Signal frame handlers...
+ */
+extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
+extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
+
+static inline int xstate_sigframe_size(void)
+{
+ return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
+}
+
+static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
+{
+ void __user *buf_fx = buf;
+ int size = xstate_sigframe_size();
+
+ if (ia32_frame && use_fxsr()) {
+ buf_fx = buf + sizeof(struct i387_fsave_struct);
+ size += sizeof(struct i387_fsave_struct);
+ }
+
+ return __restore_xstate_sig(buf, buf_fx, size);
+}
+
+/*
+ * Need to be preemption-safe.
+ *
+ * NOTE! user_fpu_begin() must be used only immediately before restoring
+ * it. This function does not do any save/restore on their own.
+ */
+static inline void user_fpu_begin(void)
+{
+ preempt_disable();
+ if (!user_has_fpu())
+ __thread_fpu_begin(current);
+ preempt_enable();
+}
+
+static inline void __save_fpu(struct task_struct *tsk)
+{
+ if (use_xsave())
+ xsave_state(&tsk->thread.fpu.state->xsave, -1);
+ else
+ fpu_fxsave(&tsk->thread.fpu);
+}
+
+/*
+ * These disable preemption on their own and are safe
+ */
+static inline void save_init_fpu(struct task_struct *tsk)
+{
+ WARN_ON_ONCE(!__thread_has_fpu(tsk));
+
+ if (use_eager_fpu()) {
+ __save_fpu(tsk);
+ return;
+ }
+
+ preempt_disable();
+ __save_init_fpu(tsk);
+ __thread_fpu_end(tsk);
+ preempt_enable();
+}
+
+/*
+ * i387 state interaction
+ */
+static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
+{
+ if (cpu_has_fxsr) {
+ return tsk->thread.fpu.state->fxsave.cwd;
+ } else {
+ return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
+ }
+}
+
+static inline unsigned short get_fpu_swd(struct task_struct *tsk)
+{
+ if (cpu_has_fxsr) {
+ return tsk->thread.fpu.state->fxsave.swd;
+ } else {
+ return (unsigned short)tsk->thread.fpu.state->fsave.swd;
+ }
+}
+
+static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
+{
+ if (cpu_has_xmm) {
+ return tsk->thread.fpu.state->fxsave.mxcsr;
+ } else {
+ return MXCSR_DEFAULT;
+ }
+}
+
+static bool fpu_allocated(struct fpu *fpu)
+{
+ return fpu->state != NULL;
+}
+
+static inline int fpu_alloc(struct fpu *fpu)
+{
+ if (fpu_allocated(fpu))
+ return 0;
+ fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
+ if (!fpu->state)
+ return -ENOMEM;
+ WARN_ON((unsigned long)fpu->state & 15);
+ return 0;
+}
+
+static inline void fpu_free(struct fpu *fpu)
+{
+ if (fpu->state) {
+ kmem_cache_free(task_xstate_cachep, fpu->state);
+ fpu->state = NULL;
+ }
+}
+
+static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
+{
+ if (use_eager_fpu()) {
+ memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
+ __save_fpu(dst);
+ } else {
+ struct fpu *dfpu = &dst->thread.fpu;
+ struct fpu *sfpu = &src->thread.fpu;
+
+ unlazy_fpu(src);
+ memcpy(dfpu->state, sfpu->state, xstate_size);
+ }
+}
+
+static inline unsigned long
+alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
+ unsigned long *size)
+{
+ unsigned long frame_size = xstate_sigframe_size();
+
+ *buf_fx = sp = round_down(sp - frame_size, 64);
+ if (ia32_frame && use_fxsr()) {
+ frame_size += sizeof(struct i387_fsave_struct);
+ sp -= sizeof(struct i387_fsave_struct);
+ }
+
+ *size = frame_size;
+ return sp;
+}
+
+#endif
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 268c783ab1c..0525a8bdf65 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -3,37 +3,53 @@
#ifdef __ASSEMBLY__
- .macro MCOUNT_SAVE_FRAME
- /* taken from glibc */
- subq $0x38, %rsp
- movq %rax, (%rsp)
- movq %rcx, 8(%rsp)
- movq %rdx, 16(%rsp)
- movq %rsi, 24(%rsp)
- movq %rdi, 32(%rsp)
- movq %r8, 40(%rsp)
- movq %r9, 48(%rsp)
+ /* skip is set if the stack was already partially adjusted */
+ .macro MCOUNT_SAVE_FRAME skip=0
+ /*
+ * We add enough stack to save all regs.
+ */
+ subq $(SS+8-\skip), %rsp
+ movq %rax, RAX(%rsp)
+ movq %rcx, RCX(%rsp)
+ movq %rdx, RDX(%rsp)
+ movq %rsi, RSI(%rsp)
+ movq %rdi, RDI(%rsp)
+ movq %r8, R8(%rsp)
+ movq %r9, R9(%rsp)
+ /* Move RIP to its proper location */
+ movq SS+8(%rsp), %rdx
+ movq %rdx, RIP(%rsp)
.endm
- .macro MCOUNT_RESTORE_FRAME
- movq 48(%rsp), %r9
- movq 40(%rsp), %r8
- movq 32(%rsp), %rdi
- movq 24(%rsp), %rsi
- movq 16(%rsp), %rdx
- movq 8(%rsp), %rcx
- movq (%rsp), %rax
- addq $0x38, %rsp
+ .macro MCOUNT_RESTORE_FRAME skip=0
+ movq R9(%rsp), %r9
+ movq R8(%rsp), %r8
+ movq RDI(%rsp), %rdi
+ movq RSI(%rsp), %rsi
+ movq RDX(%rsp), %rdx
+ movq RCX(%rsp), %rcx
+ movq RAX(%rsp), %rax
+ addq $(SS+8-\skip), %rsp
.endm
#endif
#ifdef CONFIG_FUNCTION_TRACER
-#define MCOUNT_ADDR ((long)(mcount))
+#ifdef CC_USING_FENTRY
+# define MCOUNT_ADDR ((long)(__fentry__))
+#else
+# define MCOUNT_ADDR ((long)(mcount))
+#endif
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+
#ifndef __ASSEMBLY__
extern void mcount(void);
+extern atomic_t modifying_ftrace_code;
+extern void __fentry__(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
@@ -50,8 +66,34 @@ struct dyn_arch_ftrace {
/* No extra data needed for x86 */
};
+int ftrace_int3_handler(struct pt_regs *regs);
+
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
+
+#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
+
+#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
+#include <asm/compat.h>
+
+/*
+ * Because ia32 syscalls do not map to x86_64 syscall numbers
+ * this screws up the trace output when tracing a ia32 task.
+ * Instead of reporting bogus syscalls, just do not trace them.
+ *
+ * If the user realy wants these, then they should use the
+ * raw syscall tracepoints with filtering.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+ if (is_compat_task())
+ return true;
+ return false;
+}
+#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
+#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
+
#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index d09bb03653f..b4c1f545343 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -9,11 +9,13 @@
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/processor.h>
-#include <asm/system.h>
+#include <asm/smap.h>
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
- asm volatile("1:\t" insn "\n" \
- "2:\t.section .fixup,\"ax\"\n" \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" insn "\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup,\"ax\"\n" \
"3:\tmov\t%3, %1\n" \
"\tjmp\t2b\n" \
"\t.previous\n" \
@@ -22,12 +24,14 @@
: "i" (-EFAULT), "0" (oparg), "1" (0))
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
- asm volatile("1:\tmovl %2, %0\n" \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\tmovl %2, %0\n" \
"\tmovl\t%0, %3\n" \
"\t" insn "\n" \
"2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
"\tjnz\t1b\n" \
- "3:\t.section .fixup,\"ax\"\n" \
+ "3:\t" ASM_CLAC "\n" \
+ "\t.section .fixup,\"ax\"\n" \
"4:\tmov\t%5, %1\n" \
"\tjmp\t3b\n" \
"\t.previous\n" \
@@ -51,12 +55,6 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
- /* Real i386 machines can only support FUTEX_OP_SET */
- if (op != FUTEX_OP_SET && boot_cpu_data.x86 == 3)
- return -ENOSYS;
-#endif
-
pagefault_disable();
switch (op) {
@@ -112,30 +110,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
- int ret = 0;
-
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
- /* Real i386 machines have no cmpxchg instruction */
- if (boot_cpu_data.x86 == 3)
- return -ENOSYS;
-#endif
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
- "2:\t.section .fixup, \"ax\"\n"
- "3:\tmov %3, %0\n"
- "\tjmp 2b\n"
- "\t.previous\n"
- _ASM_EXTABLE(1b, 3b)
- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
- : "i" (-EFAULT), "r" (newval), "1" (oldval)
- : "memory"
- );
-
- *uval = oldval;
- return ret;
+ return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval);
}
#endif
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
index 91d915a6525..b3799d88ffc 100644
--- a/arch/x86/include/asm/gpio.h
+++ b/arch/x86/include/asm/gpio.h
@@ -1,53 +1,4 @@
-/*
- * Generic GPIO API implementation for x86.
- *
- * Derived from the generic GPIO API for powerpc:
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _ASM_X86_GPIO_H
-#define _ASM_X86_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* _ASM_X86_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index da0b3ca815b..230853da4ec 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -7,18 +7,24 @@
typedef struct {
unsigned int __softirq_pending;
unsigned int __nmi_count; /* arch dependent */
- unsigned int irq0_irqs;
#ifdef CONFIG_X86_LOCAL_APIC
unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq_spurious_count;
unsigned int icr_read_retry_count;
#endif
+#ifdef CONFIG_HAVE_KVM
+ unsigned int kvm_posted_intr_ipis;
+#endif
unsigned int x86_platform_ipis; /* arch dependent */
unsigned int apic_perf_irqs;
unsigned int apic_irq_work_irqs;
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
+ /*
+ * irq_tlb_count is double-counted in irq_call_count, so it must be
+ * subtracted from irq_call_count when displaying irq_call_count
+ */
unsigned int irq_tlb_count;
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
@@ -27,6 +33,9 @@ typedef struct {
#ifdef CONFIG_X86_MCE_THRESHOLD
unsigned int irq_threshold_count;
#endif
+#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
+ unsigned int irq_hv_callback_count;
+#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
@@ -36,14 +45,15 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
-#define inc_irq_stat(member) percpu_inc(irq_stat.member)
+#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
-#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
+#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING
-#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
-#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
+#define set_softirq_pending(x) \
+ this_cpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq);
diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h
new file mode 100644
index 00000000000..e8c58f88b1d
--- /dev/null
+++ b/arch/x86/include/asm/hash.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_X86_HASH_H
+#define _ASM_X86_HASH_H
+
+struct fast_hash_ops;
+extern void setup_arch_fast_hash(struct fast_hash_ops *ops);
+
+#endif /* _ASM_X86_HASH_H */
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 3bd04022fd0..302a323b3f6 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -61,7 +61,7 @@ void *kmap(struct page *page);
void kunmap(struct page *page);
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
-void *__kmap_atomic(struct page *page);
+void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 2c392d663dc..36f7125945e 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -35,8 +35,6 @@
#define HPET_ID_NUMBER_SHIFT 8
#define HPET_ID_VENDOR_SHIFT 16
-#define HPET_ID_VENDOR_8086 0x8086
-
#define HPET_CFG_ENABLE 0x001
#define HPET_CFG_LEGACY 0x002
#define HPET_LEGACY_8254 2
@@ -65,6 +63,7 @@
/* hpet memory map physical address */
extern unsigned long hpet_address;
extern unsigned long force_hpet_address;
+extern int boot_hpet_disable;
extern u8 hpet_blockid;
extern int hpet_force_user;
extern u8 hpet_msi_disable;
@@ -82,9 +81,9 @@ extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
#ifdef CONFIG_PCI_MSI
-extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
+extern int default_setup_hpet_msi(unsigned int irq, unsigned int id);
#else
-static inline int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
+static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id)
{
return -EINVAL;
}
@@ -113,6 +112,7 @@ extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
static inline int hpet_enable(void) { return 0; }
static inline int is_hpet_enabled(void) { return 0; }
#define hpet_readl(a) 0
+#define default_setup_hpet_msi NULL
#endif
#endif /* _ASM_X86_HPET_H */
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index 439a9acc132..68c05398bba 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
#define _ASM_X86_HUGETLB_H
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
@@ -51,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+ ptep_clear_flush(vma, addr, ptep);
}
static inline int huge_pte_none(pte_t pte)
@@ -90,4 +92,8 @@ static inline void arch_release_hugepage(struct page *page)
{
}
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
#endif /* _ASM_X86_HUGETLB_H */
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 824ca07860d..ef1c4d2d41e 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -1,7 +1,8 @@
#ifndef _I386_HW_BREAKPOINT_H
#define _I386_HW_BREAKPOINT_H
-#ifdef __KERNEL__
+#include <uapi/asm/hw_breakpoint.h>
+
#define __ARCH_HW_BREAKPOINT_H
/*
@@ -71,6 +72,4 @@ extern int arch_bp_generic_fields(int x86_len, int x86_type,
extern struct pmu perf_ops_bp;
-#endif /* __KERNEL__ */
#endif /* _I386_HW_BREAKPOINT_H */
-
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index eb92a6ed2be..4615906d83d 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -26,61 +26,78 @@
#include <asm/sections.h>
/* Interrupt handlers registered during init_IRQ */
-extern void apic_timer_interrupt(void);
-extern void x86_platform_ipi(void);
-extern void error_interrupt(void);
-extern void irq_work_interrupt(void);
-
-extern void spurious_interrupt(void);
-extern void thermal_interrupt(void);
-extern void reschedule_interrupt(void);
-
-extern void invalidate_interrupt(void);
-extern void invalidate_interrupt0(void);
-extern void invalidate_interrupt1(void);
-extern void invalidate_interrupt2(void);
-extern void invalidate_interrupt3(void);
-extern void invalidate_interrupt4(void);
-extern void invalidate_interrupt5(void);
-extern void invalidate_interrupt6(void);
-extern void invalidate_interrupt7(void);
-extern void invalidate_interrupt8(void);
-extern void invalidate_interrupt9(void);
-extern void invalidate_interrupt10(void);
-extern void invalidate_interrupt11(void);
-extern void invalidate_interrupt12(void);
-extern void invalidate_interrupt13(void);
-extern void invalidate_interrupt14(void);
-extern void invalidate_interrupt15(void);
-extern void invalidate_interrupt16(void);
-extern void invalidate_interrupt17(void);
-extern void invalidate_interrupt18(void);
-extern void invalidate_interrupt19(void);
-extern void invalidate_interrupt20(void);
-extern void invalidate_interrupt21(void);
-extern void invalidate_interrupt22(void);
-extern void invalidate_interrupt23(void);
-extern void invalidate_interrupt24(void);
-extern void invalidate_interrupt25(void);
-extern void invalidate_interrupt26(void);
-extern void invalidate_interrupt27(void);
-extern void invalidate_interrupt28(void);
-extern void invalidate_interrupt29(void);
-extern void invalidate_interrupt30(void);
-extern void invalidate_interrupt31(void);
-
-extern void irq_move_cleanup_interrupt(void);
-extern void reboot_interrupt(void);
-extern void threshold_interrupt(void);
-
-extern void call_function_interrupt(void);
-extern void call_function_single_interrupt(void);
+extern asmlinkage void apic_timer_interrupt(void);
+extern asmlinkage void x86_platform_ipi(void);
+extern asmlinkage void kvm_posted_intr_ipi(void);
+extern asmlinkage void error_interrupt(void);
+extern asmlinkage void irq_work_interrupt(void);
+
+extern asmlinkage void spurious_interrupt(void);
+extern asmlinkage void thermal_interrupt(void);
+extern asmlinkage void reschedule_interrupt(void);
+
+extern asmlinkage void invalidate_interrupt(void);
+extern asmlinkage void invalidate_interrupt0(void);
+extern asmlinkage void invalidate_interrupt1(void);
+extern asmlinkage void invalidate_interrupt2(void);
+extern asmlinkage void invalidate_interrupt3(void);
+extern asmlinkage void invalidate_interrupt4(void);
+extern asmlinkage void invalidate_interrupt5(void);
+extern asmlinkage void invalidate_interrupt6(void);
+extern asmlinkage void invalidate_interrupt7(void);
+extern asmlinkage void invalidate_interrupt8(void);
+extern asmlinkage void invalidate_interrupt9(void);
+extern asmlinkage void invalidate_interrupt10(void);
+extern asmlinkage void invalidate_interrupt11(void);
+extern asmlinkage void invalidate_interrupt12(void);
+extern asmlinkage void invalidate_interrupt13(void);
+extern asmlinkage void invalidate_interrupt14(void);
+extern asmlinkage void invalidate_interrupt15(void);
+extern asmlinkage void invalidate_interrupt16(void);
+extern asmlinkage void invalidate_interrupt17(void);
+extern asmlinkage void invalidate_interrupt18(void);
+extern asmlinkage void invalidate_interrupt19(void);
+extern asmlinkage void invalidate_interrupt20(void);
+extern asmlinkage void invalidate_interrupt21(void);
+extern asmlinkage void invalidate_interrupt22(void);
+extern asmlinkage void invalidate_interrupt23(void);
+extern asmlinkage void invalidate_interrupt24(void);
+extern asmlinkage void invalidate_interrupt25(void);
+extern asmlinkage void invalidate_interrupt26(void);
+extern asmlinkage void invalidate_interrupt27(void);
+extern asmlinkage void invalidate_interrupt28(void);
+extern asmlinkage void invalidate_interrupt29(void);
+extern asmlinkage void invalidate_interrupt30(void);
+extern asmlinkage void invalidate_interrupt31(void);
+
+extern asmlinkage void irq_move_cleanup_interrupt(void);
+extern asmlinkage void reboot_interrupt(void);
+extern asmlinkage void threshold_interrupt(void);
+
+extern asmlinkage void call_function_interrupt(void);
+extern asmlinkage void call_function_single_interrupt(void);
+
+#ifdef CONFIG_TRACING
+/* Interrupt handlers registered during init_IRQ */
+extern void trace_apic_timer_interrupt(void);
+extern void trace_x86_platform_ipi(void);
+extern void trace_error_interrupt(void);
+extern void trace_irq_work_interrupt(void);
+extern void trace_spurious_interrupt(void);
+extern void trace_thermal_interrupt(void);
+extern void trace_reschedule_interrupt(void);
+extern void trace_threshold_interrupt(void);
+extern void trace_call_function_interrupt(void);
+extern void trace_call_function_single_interrupt(void);
+#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt
+#define trace_reboot_interrupt reboot_interrupt
+#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
+#endif /* CONFIG_TRACING */
/* IOAPIC */
#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
extern unsigned long io_apic_irqs;
-extern void init_VISWS_APIC_irqs(void);
extern void setup_IO_APIC(void);
extern void disable_IO_APIC(void);
@@ -101,6 +118,7 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
irq_attr->polarity = polarity;
}
+/* Intel specific interrupt remapping information */
struct irq_2_iommu {
struct intel_iommu *iommu;
u16 irte_index;
@@ -108,6 +126,12 @@ struct irq_2_iommu {
u8 irte_mask;
};
+/* AMD specific interrupt remapping information */
+struct irq_2_irte {
+ u16 devid; /* Device ID for IRTE table */
+ u16 index; /* Index into IRTE table*/
+};
+
/*
* This is performance-critical, we want to do it O(1)
*
@@ -120,7 +144,11 @@ struct irq_cfg {
u8 vector;
u8 move_in_progress : 1;
#ifdef CONFIG_IRQ_REMAP
- struct irq_2_iommu irq_2_iommu;
+ u8 remapped : 1;
+ union {
+ struct irq_2_iommu irq_2_iommu;
+ struct irq_2_irte irq_2_irte;
+ };
#endif
};
@@ -143,25 +171,27 @@ extern atomic_t irq_mis_count;
extern void eisa_set_level_irq(unsigned int irq);
/* SMP */
-extern void smp_apic_timer_interrupt(struct pt_regs *);
-extern void smp_spurious_interrupt(struct pt_regs *);
-extern void smp_x86_platform_ipi(struct pt_regs *);
-extern void smp_error_interrupt(struct pt_regs *);
+extern __visible void smp_apic_timer_interrupt(struct pt_regs *);
+extern __visible void smp_spurious_interrupt(struct pt_regs *);
+extern __visible void smp_x86_platform_ipi(struct pt_regs *);
+extern __visible void smp_error_interrupt(struct pt_regs *);
#ifdef CONFIG_X86_IO_APIC
extern asmlinkage void smp_irq_move_cleanup_interrupt(void);
#endif
#ifdef CONFIG_SMP
-extern void smp_reschedule_interrupt(struct pt_regs *);
-extern void smp_call_function_interrupt(struct pt_regs *);
-extern void smp_call_function_single_interrupt(struct pt_regs *);
-#ifdef CONFIG_X86_32
-extern void smp_invalidate_interrupt(struct pt_regs *);
-#else
-extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
-#endif
+extern __visible void smp_reschedule_interrupt(struct pt_regs *);
+extern __visible void smp_call_function_interrupt(struct pt_regs *);
+extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
+extern __visible void smp_invalidate_interrupt(struct pt_regs *);
#endif
extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
+#ifdef CONFIG_TRACING
+#define trace_interrupt interrupt
+#endif
+
+#define VECTOR_UNDEFINED (-1)
+#define VECTOR_RETRIGGERED (-2)
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h
deleted file mode 100644
index b80420bcd09..00000000000
--- a/arch/x86/include/asm/hyperv.h
+++ /dev/null
@@ -1,194 +0,0 @@
-#ifndef _ASM_X86_HYPERV_H
-#define _ASM_X86_HYPERV_H
-
-#include <linux/types.h>
-
-/*
- * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
- * is set by CPUID(HvCpuIdFunctionVersionAndFeatures).
- */
-#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
-#define HYPERV_CPUID_INTERFACE 0x40000001
-#define HYPERV_CPUID_VERSION 0x40000002
-#define HYPERV_CPUID_FEATURES 0x40000003
-#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
-#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
-
-#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
-#define HYPERV_CPUID_MIN 0x40000005
-#define HYPERV_CPUID_MAX 0x4000ffff
-
-/*
- * Feature identification. EAX indicates which features are available
- * to the partition based upon the current partition privileges.
- */
-
-/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */
-#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0)
-/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
-#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
-/*
- * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
- * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available
- */
-#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2)
-/*
- * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through
- * HV_X64_MSR_STIMER3_COUNT) available
- */
-#define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3)
-/*
- * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
- * are available
- */
-#define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4)
-/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/
-#define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5)
-/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/
-#define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6)
-/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/
-#define HV_X64_MSR_RESET_AVAILABLE (1 << 7)
- /*
- * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE,
- * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE,
- * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available
- */
-#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8)
-
-/*
- * Feature identification: EBX indicates which flags were specified at
- * partition creation. The format is the same as the partition creation
- * flag structure defined in section Partition Creation Flags.
- */
-#define HV_X64_CREATE_PARTITIONS (1 << 0)
-#define HV_X64_ACCESS_PARTITION_ID (1 << 1)
-#define HV_X64_ACCESS_MEMORY_POOL (1 << 2)
-#define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3)
-#define HV_X64_POST_MESSAGES (1 << 4)
-#define HV_X64_SIGNAL_EVENTS (1 << 5)
-#define HV_X64_CREATE_PORT (1 << 6)
-#define HV_X64_CONNECT_PORT (1 << 7)
-#define HV_X64_ACCESS_STATS (1 << 8)
-#define HV_X64_DEBUGGING (1 << 11)
-#define HV_X64_CPU_POWER_MANAGEMENT (1 << 12)
-#define HV_X64_CONFIGURE_PROFILER (1 << 13)
-
-/*
- * Feature identification. EDX indicates which miscellaneous features
- * are available to the partition.
- */
-/* The MWAIT instruction is available (per section MONITOR / MWAIT) */
-#define HV_X64_MWAIT_AVAILABLE (1 << 0)
-/* Guest debugging support is available */
-#define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1)
-/* Performance Monitor support is available*/
-#define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2)
-/* Support for physical CPU dynamic partitioning events is available*/
-#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3)
-/*
- * Support for passing hypercall input parameter block via XMM
- * registers is available
- */
-#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4)
-/* Support for a virtual guest idle state is available */
-#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5)
-
-/*
- * Implementation recommendations. Indicates which behaviors the hypervisor
- * recommends the OS implement for optimal performance.
- */
- /*
- * Recommend using hypercall for address space switches rather
- * than MOV to CR3 instruction
- */
-#define HV_X64_MWAIT_RECOMMENDED (1 << 0)
-/* Recommend using hypercall for local TLB flushes rather
- * than INVLPG or MOV to CR3 instructions */
-#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1)
-/*
- * Recommend using hypercall for remote TLB flushes rather
- * than inter-processor interrupts
- */
-#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2)
-/*
- * Recommend using MSRs for accessing APIC registers
- * EOI, ICR and TPR rather than their memory-mapped counterparts
- */
-#define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3)
-/* Recommend using the hypervisor-provided MSR to initiate a system RESET */
-#define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4)
-/*
- * Recommend using relaxed timing for this partition. If used,
- * the VM should disable any watchdog timeouts that rely on the
- * timely delivery of external interrupts
- */
-#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
-
-/* MSR used to identify the guest OS. */
-#define HV_X64_MSR_GUEST_OS_ID 0x40000000
-
-/* MSR used to setup pages used to communicate with the hypervisor. */
-#define HV_X64_MSR_HYPERCALL 0x40000001
-
-/* MSR used to provide vcpu index */
-#define HV_X64_MSR_VP_INDEX 0x40000002
-
-/* MSR used to read the per-partition time reference counter */
-#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
-
-/* Define the virtual APIC registers */
-#define HV_X64_MSR_EOI 0x40000070
-#define HV_X64_MSR_ICR 0x40000071
-#define HV_X64_MSR_TPR 0x40000072
-#define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073
-
-/* Define synthetic interrupt controller model specific registers. */
-#define HV_X64_MSR_SCONTROL 0x40000080
-#define HV_X64_MSR_SVERSION 0x40000081
-#define HV_X64_MSR_SIEFP 0x40000082
-#define HV_X64_MSR_SIMP 0x40000083
-#define HV_X64_MSR_EOM 0x40000084
-#define HV_X64_MSR_SINT0 0x40000090
-#define HV_X64_MSR_SINT1 0x40000091
-#define HV_X64_MSR_SINT2 0x40000092
-#define HV_X64_MSR_SINT3 0x40000093
-#define HV_X64_MSR_SINT4 0x40000094
-#define HV_X64_MSR_SINT5 0x40000095
-#define HV_X64_MSR_SINT6 0x40000096
-#define HV_X64_MSR_SINT7 0x40000097
-#define HV_X64_MSR_SINT8 0x40000098
-#define HV_X64_MSR_SINT9 0x40000099
-#define HV_X64_MSR_SINT10 0x4000009A
-#define HV_X64_MSR_SINT11 0x4000009B
-#define HV_X64_MSR_SINT12 0x4000009C
-#define HV_X64_MSR_SINT13 0x4000009D
-#define HV_X64_MSR_SINT14 0x4000009E
-#define HV_X64_MSR_SINT15 0x4000009F
-
-
-#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
-#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
-#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
- (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
-
-/* Declare the various hypercall operations. */
-#define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008
-
-#define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001
-#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12
-#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \
- (~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
-
-#define HV_PROCESSOR_POWER_STATE_C0 0
-#define HV_PROCESSOR_POWER_STATE_C1 1
-#define HV_PROCESSOR_POWER_STATE_C2 2
-#define HV_PROCESSOR_POWER_STATE_C3 3
-
-/* hypercall status code */
-#define HV_STATUS_SUCCESS 0
-#define HV_STATUS_INVALID_HYPERCALL_CODE 2
-#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
-#define HV_STATUS_INVALID_ALIGNMENT 4
-#define HV_STATUS_INSUFFICIENT_BUFFERS 19
-
-#endif
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 7a15153c675..e42f758a0fb 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -20,12 +20,11 @@
#ifndef _ASM_X86_HYPERVISOR_H
#define _ASM_X86_HYPERVISOR_H
+#ifdef CONFIG_HYPERVISOR_GUEST
+
#include <asm/kvm_para.h>
#include <asm/xen/hypervisor.h>
-extern void init_hypervisor(struct cpuinfo_x86 *c);
-extern void init_hypervisor_platform(void);
-
/*
* x86 hypervisor information
*/
@@ -34,13 +33,16 @@ struct hypervisor_x86 {
const char *name;
/* Detection routine */
- bool (*detect)(void);
+ uint32_t (*detect)(void);
/* Adjust CPU feature bits (run once per CPU) */
void (*set_cpu_features)(struct cpuinfo_x86 *);
/* Platform setup (run once per boot) */
void (*init_platform)(void);
+
+ /* X2APIC detection (run once per boot) */
+ bool (*x2apic_available)(void);
};
extern const struct hypervisor_x86 *x86_hyper;
@@ -49,14 +51,14 @@ extern const struct hypervisor_x86 *x86_hyper;
extern const struct hypervisor_x86 x86_hyper_vmware;
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
+extern const struct hypervisor_x86 x86_hyper_kvm;
-static inline bool hypervisor_x2apic_available(void)
-{
- if (kvm_para_available())
- return true;
- if (xen_x2apic_para_available())
- return true;
- return false;
-}
-
-#endif
+extern void init_hypervisor(struct cpuinfo_x86 *c);
+extern void init_hypervisor_platform(void);
+extern bool hypervisor_x2apic_available(void);
+#else
+static inline void init_hypervisor(struct cpuinfo_x86 *c) { }
+static inline void init_hypervisor_platform(void) { }
+static inline bool hypervisor_x2apic_available(void) { return false; }
+#endif /* CONFIG_HYPERVISOR_GUEST */
+#endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index a850b4d8d14..ed8089d6909 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -13,451 +13,41 @@
#ifndef __ASSEMBLY__
#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/regset.h>
#include <linux/hardirq.h>
-#include <linux/slab.h>
-#include <asm/asm.h>
-#include <asm/cpufeature.h>
-#include <asm/processor.h>
-#include <asm/sigcontext.h>
-#include <asm/user.h>
-#include <asm/uaccess.h>
-#include <asm/xsave.h>
-extern unsigned int sig_xstate_size;
-extern void fpu_init(void);
-extern void mxcsr_feature_mask_init(void);
+struct pt_regs;
+struct user_i387_struct;
+
extern int init_fpu(struct task_struct *child);
-extern void __math_state_restore(struct task_struct *);
-extern void math_state_restore(void);
+extern void fpu_finit(struct fpu *fpu);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
+extern void math_state_restore(void);
-extern user_regset_active_fn fpregs_active, xfpregs_active;
-extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
- xstateregs_get;
-extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
- xstateregs_set;
-
-/*
- * xstateregs_active == fpregs_active. Please refer to the comment
- * at the definition of fpregs_active.
- */
-#define xstateregs_active fpregs_active
-
-extern struct _fpx_sw_bytes fx_sw_reserved;
-#ifdef CONFIG_IA32_EMULATION
-extern unsigned int sig_xstate_ia32_size;
-extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
-struct _fpstate_ia32;
-struct _xstate_ia32;
-extern int save_i387_xstate_ia32(void __user *buf);
-extern int restore_i387_xstate_ia32(void __user *buf);
-#endif
-
-#ifdef CONFIG_MATH_EMULATION
-extern void finit_soft_fpu(struct i387_soft_struct *soft);
-#else
-static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
-#endif
-
-#define X87_FSW_ES (1 << 7) /* Exception Summary */
-
-static __always_inline __pure bool use_xsaveopt(void)
-{
- return static_cpu_has(X86_FEATURE_XSAVEOPT);
-}
-
-static __always_inline __pure bool use_xsave(void)
-{
- return static_cpu_has(X86_FEATURE_XSAVE);
-}
-
-static __always_inline __pure bool use_fxsr(void)
-{
- return static_cpu_has(X86_FEATURE_FXSR);
-}
-
-extern void __sanitize_i387_state(struct task_struct *);
-
-static inline void sanitize_i387_state(struct task_struct *tsk)
-{
- if (!use_xsaveopt())
- return;
- __sanitize_i387_state(tsk);
-}
-
-#ifdef CONFIG_X86_64
-static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
-{
- int err;
-
- /* See comment in fxsave() below. */
-#ifdef CONFIG_AS_FXSAVEQ
- asm volatile("1: fxrstorq %[fx]\n\t"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl $-1,%[err]\n"
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
- : [err] "=r" (err)
- : [fx] "m" (*fx), "0" (0));
-#else
- asm volatile("1: rex64/fxrstor (%[fx])\n\t"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl $-1,%[err]\n"
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
- : [err] "=r" (err)
- : [fx] "R" (fx), "m" (*fx), "0" (0));
-#endif
- return err;
-}
-
-static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
-{
- int err;
-
- /*
- * Clear the bytes not touched by the fxsave and reserved
- * for the SW usage.
- */
- err = __clear_user(&fx->sw_reserved,
- sizeof(struct _fpx_sw_bytes));
- if (unlikely(err))
- return -EFAULT;
-
- /* See comment in fxsave() below. */
-#ifdef CONFIG_AS_FXSAVEQ
- asm volatile("1: fxsaveq %[fx]\n\t"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl $-1,%[err]\n"
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
- : [err] "=r" (err), [fx] "=m" (*fx)
- : "0" (0));
-#else
- asm volatile("1: rex64/fxsave (%[fx])\n\t"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl $-1,%[err]\n"
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
- : [err] "=r" (err), "=m" (*fx)
- : [fx] "R" (fx), "0" (0));
-#endif
- if (unlikely(err) &&
- __clear_user(fx, sizeof(struct i387_fxsave_struct)))
- err = -EFAULT;
- /* No need to clear here because the caller clears USED_MATH */
- return err;
-}
-
-static inline void fpu_fxsave(struct fpu *fpu)
-{
- /* Using "rex64; fxsave %0" is broken because, if the memory operand
- uses any extended registers for addressing, a second REX prefix
- will be generated (to the assembler, rex64 followed by semicolon
- is a separate instruction), and hence the 64-bitness is lost. */
-
-#ifdef CONFIG_AS_FXSAVEQ
- /* Using "fxsaveq %0" would be the ideal choice, but is only supported
- starting with gas 2.16. */
- __asm__ __volatile__("fxsaveq %0"
- : "=m" (fpu->state->fxsave));
-#else
- /* Using, as a workaround, the properly prefixed form below isn't
- accepted by any binutils version so far released, complaining that
- the same type of prefix is used twice if an extended register is
- needed for addressing (fix submitted to mainline 2005-11-21).
- asm volatile("rex64/fxsave %0"
- : "=m" (fpu->state->fxsave));
- This, however, we can work around by forcing the compiler to select
- an addressing mode that doesn't require extended registers. */
- asm volatile("rex64/fxsave (%[fx])"
- : "=m" (fpu->state->fxsave)
- : [fx] "R" (&fpu->state->fxsave));
-#endif
-}
-
-#else /* CONFIG_X86_32 */
-
-/* perform fxrstor iff the processor has extended states, otherwise frstor */
-static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
-{
- /*
- * The "nop" is needed to make the instructions the same
- * length.
- */
- alternative_input(
- "nop ; frstor %1",
- "fxrstor %1",
- X86_FEATURE_FXSR,
- "m" (*fx));
-
- return 0;
-}
-
-static inline void fpu_fxsave(struct fpu *fpu)
-{
- asm volatile("fxsave %[fx]"
- : [fx] "=m" (fpu->state->fxsave));
-}
-
-#endif /* CONFIG_X86_64 */
-
-/*
- * These must be called with preempt disabled. Returns
- * 'true' if the FPU state is still intact.
- */
-static inline int fpu_save_init(struct fpu *fpu)
-{
- if (use_xsave()) {
- fpu_xsave(fpu);
-
- /*
- * xsave header may indicate the init state of the FP.
- */
- if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
- return 1;
- } else if (use_fxsr()) {
- fpu_fxsave(fpu);
- } else {
- asm volatile("fnsave %[fx]; fwait"
- : [fx] "=m" (fpu->state->fsave));
- return 0;
- }
-
- /*
- * If exceptions are pending, we need to clear them so
- * that we don't randomly get exceptions later.
- *
- * FIXME! Is this perhaps only true for the old-style
- * irq13 case? Maybe we could leave the x87 state
- * intact otherwise?
- */
- if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
- asm volatile("fnclex");
- return 0;
- }
- return 1;
-}
-
-static inline int __save_init_fpu(struct task_struct *tsk)
-{
- return fpu_save_init(&tsk->thread.fpu);
-}
-
-static inline int fpu_fxrstor_checking(struct fpu *fpu)
-{
- return fxrstor_checking(&fpu->state->fxsave);
-}
-
-static inline int fpu_restore_checking(struct fpu *fpu)
-{
- if (use_xsave())
- return fpu_xrstor_checking(fpu);
- else
- return fpu_fxrstor_checking(fpu);
-}
-
-static inline int restore_fpu_checking(struct task_struct *tsk)
-{
- return fpu_restore_checking(&tsk->thread.fpu);
-}
-
-/*
- * Software FPU state helpers. Careful: these need to
- * be preemption protection *and* they need to be
- * properly paired with the CR0.TS changes!
- */
-static inline int __thread_has_fpu(struct task_struct *tsk)
-{
- return tsk->thread.has_fpu;
-}
-
-/* Must be paired with an 'stts' after! */
-static inline void __thread_clear_has_fpu(struct task_struct *tsk)
-{
- tsk->thread.has_fpu = 0;
-}
-
-/* Must be paired with a 'clts' before! */
-static inline void __thread_set_has_fpu(struct task_struct *tsk)
-{
- tsk->thread.has_fpu = 1;
-}
-
-/*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
- * These generally need preemption protection to work,
- * do try to avoid using these on their own.
- */
-static inline void __thread_fpu_end(struct task_struct *tsk)
-{
- __thread_clear_has_fpu(tsk);
- stts();
-}
-
-static inline void __thread_fpu_begin(struct task_struct *tsk)
-{
- clts();
- __thread_set_has_fpu(tsk);
-}
-
-/*
- * FPU state switching for scheduling.
- *
- * This is a two-stage process:
- *
- * - switch_fpu_prepare() saves the old state and
- * sets the new state of the CR0.TS bit. This is
- * done within the context of the old process.
- *
- * - switch_fpu_finish() restores the new state as
- * necessary.
- */
-typedef struct { int preload; } fpu_switch_t;
-
-/*
- * FIXME! We could do a totally lazy restore, but we need to
- * add a per-cpu "this was the task that last touched the FPU
- * on this CPU" variable, and the task needs to have a "I last
- * touched the FPU on this CPU" and check them.
- *
- * We don't do that yet, so "fpu_lazy_restore()" always returns
- * false, but some day..
- */
-#define fpu_lazy_restore(tsk) (0)
-#define fpu_lazy_state_intact(tsk) do { } while (0)
-
-static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new)
-{
- fpu_switch_t fpu;
-
- fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
- if (__thread_has_fpu(old)) {
- if (__save_init_fpu(old))
- fpu_lazy_state_intact(old);
- __thread_clear_has_fpu(old);
- old->fpu_counter++;
-
- /* Don't change CR0.TS if we just switch! */
- if (fpu.preload) {
- __thread_set_has_fpu(new);
- prefetch(new->thread.fpu.state);
- } else
- stts();
- } else {
- old->fpu_counter = 0;
- if (fpu.preload) {
- if (fpu_lazy_restore(new))
- fpu.preload = 0;
- else
- prefetch(new->thread.fpu.state);
- __thread_fpu_begin(new);
- }
- }
- return fpu;
-}
-
-/*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
- */
-static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
-{
- if (fpu.preload)
- __math_state_restore(new);
-}
-
-/*
- * Signal frame handlers...
- */
-extern int save_i387_xstate(void __user *buf);
-extern int restore_i387_xstate(void __user *buf);
-
-static inline void __clear_fpu(struct task_struct *tsk)
-{
- if (__thread_has_fpu(tsk)) {
- /* Ignore delayed exceptions from user space */
- asm volatile("1: fwait\n"
- "2:\n"
- _ASM_EXTABLE(1b, 2b));
- __thread_fpu_end(tsk);
- }
-}
-
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * We can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- */
-static inline bool interrupted_kernel_fpu_idle(void)
-{
- return !__thread_has_fpu(current) &&
- (read_cr0() & X86_CR0_TS);
-}
-
-/*
- * Were we in user mode (or vm86 mode) when we were
- * interrupted?
- *
- * Doing kernel_fpu_begin/end() is ok if we are running
- * in an interrupt context from user mode - we'll just
- * save the FPU state as required.
- */
-static inline bool interrupted_user_mode(void)
-{
- struct pt_regs *regs = get_irq_regs();
- return regs && user_mode_vm(regs);
-}
+extern bool irq_fpu_usable(void);
/*
- * Can we use the FPU in kernel mode with the
- * whole "kernel_fpu_begin/end()" sequence?
+ * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
+ * and they don't touch the preempt state on their own.
+ * If you enable preemption after __kernel_fpu_begin(), preempt notifier
+ * should call the __kernel_fpu_end() to prevent the kernel/user FPU
+ * state from getting corrupted. KVM for example uses this model.
*
- * It's always ok in process context (ie "not interrupt")
- * but it is sometimes ok even from an irq.
+ * All other cases use kernel_fpu_begin/end() which disable preemption
+ * during kernel FPU usage.
*/
-static inline bool irq_fpu_usable(void)
-{
- return !in_interrupt() ||
- interrupted_user_mode() ||
- interrupted_kernel_fpu_idle();
-}
+extern void __kernel_fpu_begin(void);
+extern void __kernel_fpu_end(void);
static inline void kernel_fpu_begin(void)
{
- struct task_struct *me = current;
-
WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
- if (__thread_has_fpu(me)) {
- __save_init_fpu(me);
- __thread_clear_has_fpu(me);
- /* We do 'stts()' in kernel_fpu_end() */
- } else
- clts();
+ __kernel_fpu_begin();
}
static inline void kernel_fpu_end(void)
{
- stts();
+ __kernel_fpu_end();
preempt_enable();
}
@@ -501,126 +91,13 @@ static inline void irq_ts_restore(int TS_state)
* we can just assume we have FPU access - typically
* to save the FP state - we'll just take a #NM
* fault and get the FPU access back.
- *
- * The actual user_fpu_begin/end() functions
- * need to be preemption-safe, though.
- *
- * NOTE! user_fpu_end() must be used only after you
- * have saved the FP state, and user_fpu_begin() must
- * be used only immediately before restoring it.
- * These functions do not do any save/restore on
- * their own.
*/
static inline int user_has_fpu(void)
{
- return __thread_has_fpu(current);
-}
-
-static inline void user_fpu_end(void)
-{
- preempt_disable();
- __thread_fpu_end(current);
- preempt_enable();
+ return current->thread.fpu.has_fpu;
}
-static inline void user_fpu_begin(void)
-{
- preempt_disable();
- if (!user_has_fpu())
- __thread_fpu_begin(current);
- preempt_enable();
-}
-
-/*
- * These disable preemption on their own and are safe
- */
-static inline void save_init_fpu(struct task_struct *tsk)
-{
- WARN_ON_ONCE(!__thread_has_fpu(tsk));
- preempt_disable();
- __save_init_fpu(tsk);
- __thread_fpu_end(tsk);
- preempt_enable();
-}
-
-static inline void unlazy_fpu(struct task_struct *tsk)
-{
- preempt_disable();
- if (__thread_has_fpu(tsk)) {
- __save_init_fpu(tsk);
- __thread_fpu_end(tsk);
- } else
- tsk->fpu_counter = 0;
- preempt_enable();
-}
-
-static inline void clear_fpu(struct task_struct *tsk)
-{
- preempt_disable();
- __clear_fpu(tsk);
- preempt_enable();
-}
-
-/*
- * i387 state interaction
- */
-static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
-{
- if (cpu_has_fxsr) {
- return tsk->thread.fpu.state->fxsave.cwd;
- } else {
- return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
- }
-}
-
-static inline unsigned short get_fpu_swd(struct task_struct *tsk)
-{
- if (cpu_has_fxsr) {
- return tsk->thread.fpu.state->fxsave.swd;
- } else {
- return (unsigned short)tsk->thread.fpu.state->fsave.swd;
- }
-}
-
-static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
-{
- if (cpu_has_xmm) {
- return tsk->thread.fpu.state->fxsave.mxcsr;
- } else {
- return MXCSR_DEFAULT;
- }
-}
-
-static bool fpu_allocated(struct fpu *fpu)
-{
- return fpu->state != NULL;
-}
-
-static inline int fpu_alloc(struct fpu *fpu)
-{
- if (fpu_allocated(fpu))
- return 0;
- fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
- if (!fpu->state)
- return -ENOMEM;
- WARN_ON((unsigned long)fpu->state & 15);
- return 0;
-}
-
-static inline void fpu_free(struct fpu *fpu)
-{
- if (fpu->state) {
- kmem_cache_free(task_xstate_cachep, fpu->state);
- fpu->state = NULL;
- }
-}
-
-static inline void fpu_copy(struct fpu *dst, struct fpu *src)
-{
- memcpy(dst->state, src->state, xstate_size);
-}
-
-extern void fpu_finit(struct fpu *fpu);
+extern void unlazy_fpu(struct task_struct *tsk);
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 1f7e6251728..d0e8e014104 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -13,36 +13,24 @@
#include <asm/sigcontext32.h>
/* signal.h */
-struct sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal
- with 32 bits */
- unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
- compat_sigset_t sa_mask; /* A 32 bit mask */
-};
-
-struct old_sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal
- with 32 bits */
- compat_old_sigset_t sa_mask; /* A 32 bit mask */
- unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
-};
-
-typedef struct sigaltstack_ia32 {
- unsigned int ss_sp;
- int ss_flags;
- unsigned int ss_size;
-} stack_ia32_t;
struct ucontext_ia32 {
unsigned int uc_flags;
unsigned int uc_link;
- stack_ia32_t uc_stack;
+ compat_stack_t uc_stack;
struct sigcontext_ia32 uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
+struct ucontext_x32 {
+ unsigned int uc_flags;
+ unsigned int uc_link;
+ compat_stack_t uc_stack;
+ unsigned int uc__pad0; /* needed for alignment */
+ struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
/* This matches struct stat64 in glibc2.2, hence the absolutely
* insane amounts of padding around dev_t's.
*/
@@ -77,58 +65,6 @@ struct stat64 {
unsigned long long st_ino;
} __attribute__((packed));
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[((128 / sizeof(int)) - 3)];
-
- /* kill() */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- int _overrun_incr; /* amount to add to overrun */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
#define IA32_STACK_TOP IA32_PAGE_OFFSET
#ifdef __KERNEL__
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index f49253d7571..c5d1785373e 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -14,6 +14,7 @@ void exit_idle(void);
#else /* !CONFIG_X86_64 */
static inline void enter_idle(void) { }
static inline void exit_idle(void) { }
+static inline void __exit_idle(void) { }
#endif /* CONFIG_X86_64 */
void amd_e400_remove_cpu(int cpu);
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h
index 205b063e3e3..74a2e312e8a 100644
--- a/arch/x86/include/asm/inat.h
+++ b/arch/x86/include/asm/inat.h
@@ -97,11 +97,12 @@
/* Attribute search APIs */
extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
+extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
- insn_byte_t last_pfx,
+ int lpfx_id,
insn_attr_t esc_attr);
extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
- insn_byte_t last_pfx,
+ int lpfx_id,
insn_attr_t esc_attr);
extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
insn_byte_t vex_m,
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index adcc0ae73d0..223042086f4 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -1,20 +1,14 @@
-#ifndef _ASM_X86_INIT_32_H
-#define _ASM_X86_INIT_32_H
+#ifndef _ASM_X86_INIT_H
+#define _ASM_X86_INIT_H
-#ifdef CONFIG_X86_32
-extern void __init early_ioremap_page_table_range_init(void);
-#endif
+struct x86_mapping_info {
+ void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
+ void *context; /* context for alloc_pgt_page */
+ unsigned long pmd_flag; /* page flag for PMD entry */
+ bool kernel_mapping; /* kernel mapping or ident mapping */
+};
-extern void __init zone_sizes_init(void);
+int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
+ unsigned long addr, unsigned long end);
-extern unsigned long __init
-kernel_physical_mapping_init(unsigned long start,
- unsigned long end,
- unsigned long page_size_mask);
-
-
-extern unsigned long __initdata pgt_buf_start;
-extern unsigned long __meminitdata pgt_buf_end;
-extern unsigned long __meminitdata pgt_buf_top;
-
-#endif /* _ASM_X86_INIT_32_H */
+#endif /* _ASM_X86_INIT_H */
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 74df3f1eddf..48eb30a8606 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -96,12 +96,6 @@ struct insn {
#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
-/* The last prefix is needed for two-byte and three-byte opcodes */
-static inline insn_byte_t insn_last_prefix(struct insn *insn)
-{
- return insn->prefixes.bytes[3];
-}
-
extern void insn_init(struct insn *insn, const void *kaddr, int x86_64);
extern void insn_get_prefixes(struct insn *insn);
extern void insn_get_opcode(struct insn *insn);
@@ -160,6 +154,18 @@ static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
return X86_VEX_P(insn->vex_prefix.bytes[2]);
}
+/* Get the last prefix id from last prefix or VEX prefix */
+static inline int insn_last_prefix_id(struct insn *insn)
+{
+ if (insn_is_avx(insn))
+ return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
+
+ if (insn->prefixes.bytes[3])
+ return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
+
+ return 0;
+}
+
/* Offset of each field from kaddr */
static inline int insn_offset_rex_prefix(struct insn *insn)
{
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index 280bf7fb6ab..3e115273ed8 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -9,12 +9,68 @@
#define REG_NUM_INVALID 100
-#define REG_TYPE_R64 0
-#define REG_TYPE_XMM 1
+#define REG_TYPE_R32 0
+#define REG_TYPE_R64 1
+#define REG_TYPE_XMM 2
#define REG_TYPE_INVALID 100
+ .macro R32_NUM opd r32
+ \opd = REG_NUM_INVALID
+ .ifc \r32,%eax
+ \opd = 0
+ .endif
+ .ifc \r32,%ecx
+ \opd = 1
+ .endif
+ .ifc \r32,%edx
+ \opd = 2
+ .endif
+ .ifc \r32,%ebx
+ \opd = 3
+ .endif
+ .ifc \r32,%esp
+ \opd = 4
+ .endif
+ .ifc \r32,%ebp
+ \opd = 5
+ .endif
+ .ifc \r32,%esi
+ \opd = 6
+ .endif
+ .ifc \r32,%edi
+ \opd = 7
+ .endif
+#ifdef CONFIG_X86_64
+ .ifc \r32,%r8d
+ \opd = 8
+ .endif
+ .ifc \r32,%r9d
+ \opd = 9
+ .endif
+ .ifc \r32,%r10d
+ \opd = 10
+ .endif
+ .ifc \r32,%r11d
+ \opd = 11
+ .endif
+ .ifc \r32,%r12d
+ \opd = 12
+ .endif
+ .ifc \r32,%r13d
+ \opd = 13
+ .endif
+ .ifc \r32,%r14d
+ \opd = 14
+ .endif
+ .ifc \r32,%r15d
+ \opd = 15
+ .endif
+#endif
+ .endm
+
.macro R64_NUM opd r64
\opd = REG_NUM_INVALID
+#ifdef CONFIG_X86_64
.ifc \r64,%rax
\opd = 0
.endif
@@ -63,6 +119,7 @@
.ifc \r64,%r15
\opd = 15
.endif
+#endif
.endm
.macro XMM_NUM opd xmm
@@ -118,10 +175,13 @@
.endm
.macro REG_TYPE type reg
+ R32_NUM reg_type_r32 \reg
R64_NUM reg_type_r64 \reg
XMM_NUM reg_type_xmm \reg
.if reg_type_r64 <> REG_NUM_INVALID
\type = REG_TYPE_R64
+ .elseif reg_type_r32 <> REG_NUM_INVALID
+ \type = REG_TYPE_R32
.elseif reg_type_xmm <> REG_NUM_INVALID
\type = REG_TYPE_XMM
.else
@@ -162,6 +222,16 @@
.byte \imm8
.endm
+ .macro PEXTRD imm8 xmm gpr
+ R32_NUM extrd_opd1 \gpr
+ XMM_NUM extrd_opd2 \xmm
+ PFX_OPD_SIZE
+ PFX_REX extrd_opd1 extrd_opd2
+ .byte 0x0f, 0x3a, 0x16
+ MODRM 0xc0 extrd_opd1 extrd_opd2
+ .byte \imm8
+ .endm
+
.macro AESKEYGENASSIST rcon xmm1 xmm2
XMM_NUM aeskeygen_opd1 \xmm1
XMM_NUM aeskeygen_opd2 \xmm2
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
new file mode 100644
index 00000000000..e34e097b6f9
--- /dev/null
+++ b/arch/x86/include/asm/intel-mid.h
@@ -0,0 +1,157 @@
+/*
+ * intel-mid.h: Intel MID specific setup code
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _ASM_X86_INTEL_MID_H
+#define _ASM_X86_INTEL_MID_H
+
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+
+extern int intel_mid_pci_init(void);
+extern int get_gpio_by_name(const char *name);
+extern void intel_scu_device_register(struct platform_device *pdev);
+extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
+extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
+extern int sfi_mrtc_num;
+extern struct sfi_rtc_table_entry sfi_mrtc_array[];
+
+/*
+ * Here defines the array of devices platform data that IAFW would export
+ * through SFI "DEVS" table, we use name and type to match the device and
+ * its platform data.
+ */
+struct devs_id {
+ char name[SFI_NAME_LEN + 1];
+ u8 type;
+ u8 delay;
+ void *(*get_platform_data)(void *info);
+ /* Custom handler for devices */
+ void (*device_handler)(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev);
+};
+
+#define sfi_device(i) \
+ static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
+ __attribute__((__section__(".x86_intel_mid_dev.init"))) = &i
+
+/*
+ * Medfield is the follow-up of Moorestown, it combines two chip solution into
+ * one. Other than that it also added always-on and constant tsc and lapic
+ * timers. Medfield is the platform name, and the chip name is called Penwell
+ * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
+ * identified via MSRs.
+ */
+enum intel_mid_cpu_type {
+ /* 1 was Moorestown */
+ INTEL_MID_CPU_CHIP_PENWELL = 2,
+ INTEL_MID_CPU_CHIP_CLOVERVIEW,
+ INTEL_MID_CPU_CHIP_TANGIER,
+};
+
+extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
+
+/**
+ * struct intel_mid_ops - Interface between intel-mid & sub archs
+ * @arch_setup: arch_setup function to re-initialize platform
+ * structures (x86_init, x86_platform_init)
+ *
+ * This structure can be extended if any new interface is required
+ * between intel-mid & its sub arch files.
+ */
+struct intel_mid_ops {
+ void (*arch_setup)(void);
+};
+
+/* Helper API's for INTEL_MID_OPS_INIT */
+#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
+ [cpuid] = get_##cpuname##_ops
+
+/* Maximum number of CPU ops */
+#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
+
+/*
+ * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
+ * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
+ */
+#define INTEL_MID_OPS_INIT {\
+ DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
+ DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
+ DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
+};
+
+#ifdef CONFIG_X86_INTEL_MID
+
+static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
+{
+ return __intel_mid_cpu_chip;
+}
+
+static inline bool intel_mid_has_msic(void)
+{
+ return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL);
+}
+
+#else /* !CONFIG_X86_INTEL_MID */
+
+#define intel_mid_identify_cpu() (0)
+#define intel_mid_has_msic() (0)
+
+#endif /* !CONFIG_X86_INTEL_MID */
+
+enum intel_mid_timer_options {
+ INTEL_MID_TIMER_DEFAULT,
+ INTEL_MID_TIMER_APBT_ONLY,
+ INTEL_MID_TIMER_LAPIC_APBT,
+};
+
+extern enum intel_mid_timer_options intel_mid_timer_options;
+
+/*
+ * Penwell uses spread spectrum clock, so the freq number is not exactly
+ * the same as reported by MSR based on SDM.
+ */
+#define FSB_FREQ_83SKU 83200
+#define FSB_FREQ_100SKU 99840
+#define FSB_FREQ_133SKU 133000
+
+#define FSB_FREQ_167SKU 167000
+#define FSB_FREQ_200SKU 200000
+#define FSB_FREQ_267SKU 267000
+#define FSB_FREQ_333SKU 333000
+#define FSB_FREQ_400SKU 400000
+
+/* Bus Select SoC Fuse value */
+#define BSEL_SOC_FUSE_MASK 0x7
+#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */
+#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */
+#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */
+
+#define SFI_MTMR_MAX_NUM 8
+#define SFI_MRTC_MAX 8
+
+extern struct console early_mrst_console;
+extern void mrst_early_console_init(void);
+
+extern struct console early_hsu_console;
+extern void hsu_early_console_init(const char *);
+
+extern void intel_scu_devices_create(void);
+extern void intel_scu_devices_destroy(void);
+
+/* VRTC timer */
+#define MRST_VRTC_MAP_SZ (1024)
+/*#define MRST_VRTC_PGOFFSET (0xc00) */
+
+extern void intel_mid_rtc_init(void);
+
+/* the offset for the mapping of global gpio pin to irq */
+#define INTEL_MID_IRQ_OFFSET 0x100
+
+#endif /* _ASM_X86_INTEL_MID_H */
diff --git a/arch/x86/include/asm/intel_mid_vrtc.h b/arch/x86/include/asm/intel_mid_vrtc.h
new file mode 100644
index 00000000000..86ff4685c40
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_vrtc.h
@@ -0,0 +1,9 @@
+#ifndef _INTEL_MID_VRTC_H
+#define _INTEL_MID_VRTC_H
+
+extern unsigned char vrtc_cmos_read(unsigned char reg);
+extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
+extern void vrtc_get_time(struct timespec *now);
+extern int vrtc_set_mmss(const struct timespec *now);
+
+#endif
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index d8e8eefbe24..b8237d8a1e0 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -39,6 +39,7 @@
#include <linux/string.h>
#include <linux/compiler.h>
#include <asm/page.h>
+#include <asm/early_ioremap.h>
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
@@ -237,7 +238,7 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
static inline void flush_write_buffers(void)
{
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+#if defined(CONFIG_X86_PPRO_FENCE)
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
#endif
}
@@ -316,19 +317,6 @@ extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
unsigned long prot_val);
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
-/*
- * early_ioremap() and early_iounmap() are for temporary early boot-time
- * mappings, before the real ioremap() is functional.
- * A boot-time mapping is currently limited to at most 16 pages.
- */
-extern void early_ioremap_init(void);
-extern void early_ioremap_reset(void);
-extern void __iomem *early_ioremap(resource_size_t phys_addr,
- unsigned long size);
-extern void __iomem *early_memremap(resource_size_t phys_addr,
- unsigned long size);
-extern void early_iounmap(void __iomem *addr, unsigned long size);
-extern void fixup_early_ioremap(void);
extern bool is_early_ioremap_ptep(pte_t *ptep);
#ifdef CONFIG_XEN
@@ -345,4 +333,11 @@ extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
#define IO_SPACE_LIMIT 0xffff
+#ifdef CONFIG_MTRR
+extern int __must_check arch_phys_wc_add(unsigned long base,
+ unsigned long size);
+extern void arch_phys_wc_del(int handle);
+#define arch_phys_wc_add arch_phys_wc_add
+#endif
+
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 690d1cc9a87..90f97b4b934 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -5,7 +5,7 @@
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/irq_vectors.h>
-
+#include <asm/x86_init.h>
/*
* Intel IO-APIC support for SMP and UP systems.
*
@@ -144,20 +144,30 @@ extern int timer_through_8259;
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
struct io_apic_irq_attr;
+struct irq_cfg;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
void setup_IO_APIC_irq_extra(u32 gsi);
-extern void ioapic_and_gsi_init(void);
extern void ioapic_insert_resources(void);
+extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
+ unsigned int, int,
+ struct io_apic_irq_attr *);
+extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
+ unsigned int, int,
+ struct io_apic_irq_attr *);
+extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
+
+extern void native_compose_msi_msg(struct pci_dev *pdev,
+ unsigned int irq, unsigned int dest,
+ struct msi_msg *msg, u8 hpet_id);
+extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
extern int save_ioapic_entries(void);
extern void mask_ioapic_entries(void);
extern int restore_ioapic_entries(void);
-extern int get_nr_irqs_gsi(void);
-
extern void setup_ioapic_ids_from_mpc(void);
extern void setup_ioapic_ids_from_mpc_nocheck(void);
@@ -176,12 +186,38 @@ extern void mp_save_irq(struct mpc_intsrc *m);
extern void disable_ioapic_support(void);
+extern void __init native_io_apic_init_mappings(void);
+extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
+extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
+extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
+extern void native_disable_io_apic(void);
+extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
+extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
+extern int native_ioapic_set_affinity(struct irq_data *,
+ const struct cpumask *,
+ bool);
+
+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+{
+ return x86_io_apic_ops.read(apic, reg);
+}
+
+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ x86_io_apic_ops.write(apic, reg, value);
+}
+static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ x86_io_apic_ops.modify(apic, reg, value);
+}
+
+extern void io_apic_eoi(unsigned int apic, unsigned int vector);
+
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
#define setup_ioapic_ids_from_mpc x86_init_noop
static const int timer_through_8259 = 0;
-static inline void ioapic_and_gsi_init(void) { }
static inline void ioapic_insert_resources(void) { }
#define gsi_top (NR_IRQS_LEGACY)
static inline int mp_find_ioapic(u32 gsi) { return 0; }
@@ -203,6 +239,16 @@ static inline int restore_ioapic_entries(void)
static inline void mp_save_irq(struct mpc_intsrc *m) { };
static inline void disable_ioapic_support(void) { }
+#define native_io_apic_init_mappings NULL
+#define native_io_apic_read NULL
+#define native_io_apic_write NULL
+#define native_io_apic_modify NULL
+#define native_disable_io_apic NULL
+#define native_io_apic_print_entries NULL
+#define native_ioapic_set_affinity NULL
+#define native_setup_ioapic_entry NULL
+#define native_compose_msi_msg NULL
+#define native_eoi_ioapic_pin NULL
#endif
#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/ioctl.h b/arch/x86/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe..00000000000
--- a/arch/x86/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/x86/include/asm/ioctls.h b/arch/x86/include/asm/ioctls.h
deleted file mode 100644
index ec34c760665..00000000000
--- a/arch/x86/include/asm/ioctls.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctls.h>
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index dffc38ee625..345c99cef15 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -5,7 +5,6 @@ extern struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
extern int iommu_pass_through;
-extern int iommu_group_mf;
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h
index f229b13a5f3..f42a04735a0 100644
--- a/arch/x86/include/asm/iommu_table.h
+++ b/arch/x86/include/asm/iommu_table.h
@@ -48,7 +48,7 @@ struct iommu_table_entry {
#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\
- static const struct iommu_table_entry const \
+ static const struct iommu_table_entry \
__iommu_entry_##_detect __used \
__attribute__ ((unused, __section__(".iommu_table"), \
aligned((sizeof(void *))))) \
@@ -63,10 +63,10 @@ struct iommu_table_entry {
* to stop detecting the other IOMMUs after yours has been detected.
*/
#define IOMMU_INIT_POST(_detect) \
- __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 0)
+ __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 0)
#define IOMMU_INIT_POST_FINISH(detect) \
- __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 1)
+ __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 1)
/*
* A more sophisticated version of IOMMU_INIT. This variant requires:
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
new file mode 100644
index 00000000000..57995f0596a
--- /dev/null
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -0,0 +1,145 @@
+/*
+ * iosf_mbi.h: Intel OnChip System Fabric MailBox access support
+ */
+
+#ifndef IOSF_MBI_SYMS_H
+#define IOSF_MBI_SYMS_H
+
+#define MBI_MCR_OFFSET 0xD0
+#define MBI_MDR_OFFSET 0xD4
+#define MBI_MCRX_OFFSET 0xD8
+
+#define MBI_RD_MASK 0xFEFFFFFF
+#define MBI_WR_MASK 0X01000000
+
+#define MBI_MASK_HI 0xFFFFFF00
+#define MBI_MASK_LO 0x000000FF
+#define MBI_ENABLE 0xF0
+
+/* Baytrail available units */
+#define BT_MBI_UNIT_AUNIT 0x00
+#define BT_MBI_UNIT_SMC 0x01
+#define BT_MBI_UNIT_CPU 0x02
+#define BT_MBI_UNIT_BUNIT 0x03
+#define BT_MBI_UNIT_PMC 0x04
+#define BT_MBI_UNIT_GFX 0x06
+#define BT_MBI_UNIT_SMI 0x0C
+#define BT_MBI_UNIT_USB 0x43
+#define BT_MBI_UNIT_SATA 0xA3
+#define BT_MBI_UNIT_PCIE 0xA6
+
+/* Baytrail read/write opcodes */
+#define BT_MBI_AUNIT_READ 0x10
+#define BT_MBI_AUNIT_WRITE 0x11
+#define BT_MBI_SMC_READ 0x10
+#define BT_MBI_SMC_WRITE 0x11
+#define BT_MBI_CPU_READ 0x10
+#define BT_MBI_CPU_WRITE 0x11
+#define BT_MBI_BUNIT_READ 0x10
+#define BT_MBI_BUNIT_WRITE 0x11
+#define BT_MBI_PMC_READ 0x06
+#define BT_MBI_PMC_WRITE 0x07
+#define BT_MBI_GFX_READ 0x00
+#define BT_MBI_GFX_WRITE 0x01
+#define BT_MBI_SMIO_READ 0x06
+#define BT_MBI_SMIO_WRITE 0x07
+#define BT_MBI_USB_READ 0x06
+#define BT_MBI_USB_WRITE 0x07
+#define BT_MBI_SATA_READ 0x00
+#define BT_MBI_SATA_WRITE 0x01
+#define BT_MBI_PCIE_READ 0x00
+#define BT_MBI_PCIE_WRITE 0x01
+
+/* Quark available units */
+#define QRK_MBI_UNIT_HBA 0x00
+#define QRK_MBI_UNIT_HB 0x03
+#define QRK_MBI_UNIT_RMU 0x04
+#define QRK_MBI_UNIT_MM 0x05
+#define QRK_MBI_UNIT_MMESRAM 0x05
+#define QRK_MBI_UNIT_SOC 0x31
+
+/* Quark read/write opcodes */
+#define QRK_MBI_HBA_READ 0x10
+#define QRK_MBI_HBA_WRITE 0x11
+#define QRK_MBI_HB_READ 0x10
+#define QRK_MBI_HB_WRITE 0x11
+#define QRK_MBI_RMU_READ 0x10
+#define QRK_MBI_RMU_WRITE 0x11
+#define QRK_MBI_MM_READ 0x10
+#define QRK_MBI_MM_WRITE 0x11
+#define QRK_MBI_MMESRAM_READ 0x12
+#define QRK_MBI_MMESRAM_WRITE 0x13
+#define QRK_MBI_SOC_READ 0x06
+#define QRK_MBI_SOC_WRITE 0x07
+
+#if IS_ENABLED(CONFIG_IOSF_MBI)
+
+bool iosf_mbi_available(void);
+
+/**
+ * iosf_mbi_read() - MailBox Interface read command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be read
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
+
+/**
+ * iosf_mbi_write() - MailBox unmasked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be written
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
+
+/**
+ * iosf_mbi_modify() - MailBox masked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data being modified
+ * @mask: mask indicating bits in mdr to be modified
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
+
+#else /* CONFIG_IOSF_MBI is not enabled */
+static inline
+bool iosf_mbi_available(void)
+{
+ return false;
+}
+
+static inline
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+ WARN(1, "IOSF_MBI driver not available");
+ return -EPERM;
+}
+
+static inline
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+ WARN(1, "IOSF_MBI driver not available");
+ return -EPERM;
+}
+
+static inline
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+ WARN(1, "IOSF_MBI driver not available");
+ return -EPERM;
+}
+#endif /* CONFIG_IOSF_MBI */
+
+#endif /* IOSF_MBI_SYMS_H */
diff --git a/arch/x86/include/asm/ipcbuf.h b/arch/x86/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d..00000000000
--- a/arch/x86/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index ba870bb6dd8..a80cbb88ea9 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -25,6 +25,7 @@ extern void irq_ctx_init(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpumask.h>
+extern int check_irq_vectors_for_cpu_disable(void);
extern void fixup_irqs(void);
extern void irq_force_complete_move(int);
#endif
@@ -33,7 +34,7 @@ extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void);
extern bool handle_irq(unsigned irq, struct pt_regs *regs);
-extern unsigned int do_IRQ(struct pt_regs *regs);
+extern __visible unsigned int do_IRQ(struct pt_regs *regs);
/* Interrupt vector management */
extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
@@ -41,4 +42,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
extern void init_ISA_irqs(void);
+#ifdef CONFIG_X86_LOCAL_APIC
+void arch_trigger_all_cpu_backtrace(bool);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+#endif
+
#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/irq_controller.h b/arch/x86/include/asm/irq_controller.h
deleted file mode 100644
index 423bbbddf36..00000000000
--- a/arch/x86/include/asm/irq_controller.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef __IRQ_CONTROLLER__
-#define __IRQ_CONTROLLER__
-
-struct irq_domain {
- int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize,
- u32 *out_hwirq, u32 *out_type);
- void *priv;
- struct device_node *controller;
- struct list_head l;
-};
-
-#endif
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
index 77843225b7e..d82250b1deb 100644
--- a/arch/x86/include/asm/irq_regs.h
+++ b/arch/x86/include/asm/irq_regs.h
@@ -15,7 +15,7 @@ DECLARE_PER_CPU(struct pt_regs *, irq_regs);
static inline struct pt_regs *get_irq_regs(void)
{
- return percpu_read(irq_regs);
+ return this_cpu_read(irq_regs);
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
@@ -23,7 +23,7 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
struct pt_regs *old_regs;
old_regs = get_irq_regs();
- percpu_write(irq_regs, new_regs);
+ this_cpu_write(irq_regs, new_regs);
return old_regs;
}
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 47d99934580..b7747c4c2cf 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -1,45 +1,109 @@
-#ifndef _ASM_X86_IRQ_REMAPPING_H
-#define _ASM_X86_IRQ_REMAPPING_H
+/*
+ * Copyright (C) 2012 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This header file contains the interface of the interrupt remapping code to
+ * the x86 interrupt management code.
+ */
-#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
+#ifndef __X86_IRQ_REMAPPING_H
+#define __X86_IRQ_REMAPPING_H
+
+#include <asm/io_apic.h>
+
+struct IO_APIC_route_entry;
+struct io_apic_irq_attr;
+struct irq_chip;
+struct msi_msg;
+struct pci_dev;
+struct irq_cfg;
#ifdef CONFIG_IRQ_REMAP
-static void irq_remap_modify_chip_defaults(struct irq_chip *chip);
-static inline void prepare_irte(struct irte *irte, int vector,
- unsigned int dest)
+
+extern void setup_irq_remapping_ops(void);
+extern int irq_remapping_supported(void);
+extern void set_irq_remapping_broken(void);
+extern int irq_remapping_prepare(void);
+extern int irq_remapping_enable(void);
+extern void irq_remapping_disable(void);
+extern int irq_remapping_reenable(int);
+extern int irq_remap_enable_fault_handling(void);
+extern int setup_ioapic_remapped_entry(int irq,
+ struct IO_APIC_route_entry *entry,
+ unsigned int destination,
+ int vector,
+ struct io_apic_irq_attr *attr);
+extern void free_remapped_irq(int irq);
+extern void compose_remapped_msi_msg(struct pci_dev *pdev,
+ unsigned int irq, unsigned int dest,
+ struct msi_msg *msg, u8 hpet_id);
+extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
+extern void panic_if_irq_remap(const char *msg);
+extern bool setup_remapped_irq(int irq,
+ struct irq_cfg *cfg,
+ struct irq_chip *chip);
+
+void irq_remap_modify_chip_defaults(struct irq_chip *chip);
+
+#else /* CONFIG_IRQ_REMAP */
+
+static inline void setup_irq_remapping_ops(void) { }
+static inline int irq_remapping_supported(void) { return 0; }
+static inline void set_irq_remapping_broken(void) { }
+static inline int irq_remapping_prepare(void) { return -ENODEV; }
+static inline int irq_remapping_enable(void) { return -ENODEV; }
+static inline void irq_remapping_disable(void) { }
+static inline int irq_remapping_reenable(int eim) { return -ENODEV; }
+static inline int irq_remap_enable_fault_handling(void) { return -ENODEV; }
+static inline int setup_ioapic_remapped_entry(int irq,
+ struct IO_APIC_route_entry *entry,
+ unsigned int destination,
+ int vector,
+ struct io_apic_irq_attr *attr)
{
- memset(irte, 0, sizeof(*irte));
-
- irte->present = 1;
- irte->dst_mode = apic->irq_dest_mode;
- /*
- * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
- * actual level or edge trigger will be setup in the IO-APIC
- * RTE. This will help simplify level triggered irq migration.
- * For more details, see the comments (in io_apic.c) explainig IO-APIC
- * irq migration in the presence of interrupt-remapping.
- */
- irte->trigger_mode = 0;
- irte->dlvry_mode = apic->irq_delivery_mode;
- irte->vector = vector;
- irte->dest_id = IRTE_DEST(dest);
- irte->redir_hint = 1;
+ return -ENODEV;
}
-static inline bool irq_remapped(struct irq_cfg *cfg)
+static inline void free_remapped_irq(int irq) { }
+static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
+ unsigned int irq, unsigned int dest,
+ struct msi_msg *msg, u8 hpet_id)
{
- return cfg->irq_2_iommu.iommu != NULL;
}
-#else
-static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
+static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
{
+ return -ENODEV;
}
-static inline bool irq_remapped(struct irq_cfg *cfg)
+
+static inline void panic_if_irq_remap(const char *msg)
{
- return false;
}
+
static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
{
}
-#endif
-#endif /* _ASM_X86_IRQ_REMAPPING_H */
+static inline bool setup_remapped_irq(int irq,
+ struct irq_cfg *cfg,
+ struct irq_chip *chip)
+{
+ return false;
+}
+#endif /* CONFIG_IRQ_REMAP */
+
+#define dmar_alloc_hwirq() irq_alloc_hwirq(-1)
+#define dmar_free_hwirq irq_free_hwirq
+
+#endif /* __X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4b4448761e8..5702d7e3111 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -102,6 +102,11 @@
*/
#define X86_PLATFORM_IPI_VECTOR 0xf7
+/* Vector for KVM to deliver posted interrupt IPI */
+#ifdef CONFIG_HAVE_KVM
+#define POSTED_INTR_VECTOR 0xf2
+#endif
+
/*
* IRQ work vector:
*/
@@ -109,8 +114,8 @@
#define UV_BAU_MESSAGE 0xf5
-/* Xen vector callback to receive events in a HVM domain */
-#define XEN_HVM_EVTCHN_CALLBACK 0xf3
+/* Vector on which hypervisor callbacks will be delivered */
+#define HYPERVISOR_CALLBACK_VECTOR 0xf3
/*
* Local APIC timer IRQ vector is on a different priority level,
@@ -119,17 +124,6 @@
*/
#define LOCAL_TIMER_VECTOR 0xef
-/* up to 32 vectors used for spreading out TLB flushes: */
-#if NR_CPUS <= 32
-# define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS)
-#else
-# define NUM_INVALIDATE_TLB_VECTORS (32)
-#endif
-
-#define INVALIDATE_TLB_VECTOR_END (0xee)
-#define INVALIDATE_TLB_VECTOR_START \
- (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1)
-
#define NR_VECTORS 256
#define FPU_IRQ 13
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index bba3cf88e62..0a8b519226b 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
-#define INTERRUPT_RETURN iretq
+#define INTERRUPT_RETURN jmp native_iret
#define USERGS_SYSRET64 \
swapgs; \
sysretq;
diff --git a/arch/x86/include/asm/ist.h b/arch/x86/include/asm/ist.h
index 7e5dff1de0e..c9803f1a203 100644
--- a/arch/x86/include/asm/ist.h
+++ b/arch/x86/include/asm/ist.h
@@ -1,6 +1,3 @@
-#ifndef _ASM_X86_IST_H
-#define _ASM_X86_IST_H
-
/*
* Include file for the interface to IST BIOS
* Copyright 2002 Andy Grover <andrew.grover@intel.com>
@@ -15,20 +12,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
+#ifndef _ASM_X86_IST_H
+#define _ASM_X86_IST_H
+#include <uapi/asm/ist.h>
-#include <linux/types.h>
-
-struct ist_info {
- __u32 signature;
- __u32 command;
- __u32 event;
- __u32 perf_level;
-};
-
-#ifdef __KERNEL__
extern struct ist_info ist_info;
-#endif /* __KERNEL__ */
#endif /* _ASM_X86_IST_H */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index a32b18ce6ea..6a2cefb4395 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -3,18 +3,23 @@
#ifdef __KERNEL__
+#include <linux/stringify.h>
#include <linux/types.h>
#include <asm/nops.h>
#include <asm/asm.h>
#define JUMP_LABEL_NOP_SIZE 5
-#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
+#ifdef CONFIG_X86_64
+# define STATIC_KEY_INIT_NOP P6_NOP5_ATOMIC
+#else
+# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
+#endif
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("1:"
- JUMP_LABEL_INITIAL_NOP
+ asm_volatile_goto("1:"
+ ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
diff --git a/arch/x86/include/asm/kbdleds.h b/arch/x86/include/asm/kbdleds.h
new file mode 100644
index 00000000000..f27ac5ff597
--- /dev/null
+++ b/arch/x86/include/asm/kbdleds.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_X86_KBDLEDS_H
+#define _ASM_X86_KBDLEDS_H
+
+/*
+ * Some laptops take the 789uiojklm,. keys as number pad when NumLock is on.
+ * This seems a good reason to start with NumLock off. That's why on X86 we
+ * ask the bios for the correct state.
+ */
+
+#include <asm/setup.h>
+
+static inline int kbd_defleds(void)
+{
+ return boot_params.kbd_status & 0x20 ? (1 << VC_NUMLOCK) : 0;
+}
+
+#endif /* _ASM_X86_KBDLEDS_H */
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index d73f1571bde..32ce71375b2 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -21,10 +21,9 @@ enum die_val {
DIE_NMIUNKNOWN,
};
-extern void printk_address(unsigned long address, int reliable);
+extern void printk_address(unsigned long address);
extern void die(const char *, struct pt_regs *,long);
extern int __must_check __die(const char *, struct pt_regs *, long);
-extern void show_registers(struct pt_regs *regs);
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
unsigned long *sp, unsigned long bp);
extern void __show_regs(struct pt_regs *regs, int all);
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 317ff1703d0..17483a492f1 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -48,11 +48,11 @@
# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
#else
/* Maximum physical address we can use pages from */
-# define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL)
+# define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
/* Maximum address we can reach in physical address mode */
-# define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL)
+# define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
/* Maximum address we can use for the control pages */
-# define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL)
+# define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
/* Allocate one page for the pdp and the second for the code */
# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
@@ -163,6 +163,9 @@ struct kimage_arch {
};
#endif
+typedef void crash_vmclear_fn(void);
+extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_KEXEC_H */
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index 77e95f54570..332f98c9111 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -64,11 +64,15 @@ enum regnames {
GDB_PS, /* 17 */
GDB_CS, /* 18 */
GDB_SS, /* 19 */
+ GDB_DS, /* 20 */
+ GDB_ES, /* 21 */
+ GDB_FS, /* 22 */
+ GDB_GS, /* 23 */
};
#define GDB_ORIG_AX 57
-#define DBG_MAX_REG_NUM 20
-/* 17 64 bit regs and 3 32 bit regs */
-#define NUMREGBYTES ((17 * 8) + (3 * 4))
+#define DBG_MAX_REG_NUM 24
+/* 17 64 bit regs and 5 32 bit regs */
+#define NUMREGBYTES ((17 * 8) + (5 * 4))
#endif /* ! CONFIG_X86_32 */
static inline void arch_kgdb_breakpoint(void)
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 54788253915..53cdfb2857a 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -27,6 +27,7 @@
#include <asm/insn.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define ARCH_SUPPORTS_KPROBES_ON_FTRACE
struct pt_regs;
struct kprobe;
@@ -48,10 +49,10 @@ typedef u8 kprobe_opcode_t;
#define flush_insn_slot(p) do { } while (0)
/* optinsn template addresses */
-extern kprobe_opcode_t optprobe_template_entry;
-extern kprobe_opcode_t optprobe_template_val;
-extern kprobe_opcode_t optprobe_template_call;
-extern kprobe_opcode_t optprobe_template_end;
+extern __visible kprobe_opcode_t optprobe_template_entry;
+extern __visible kprobe_opcode_t optprobe_template_val;
+extern __visible kprobe_opcode_t optprobe_template_call;
+extern __visible kprobe_opcode_t optprobe_template_end;
#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
#define MAX_OPTINSN_SIZE \
(((unsigned long)&optprobe_template_end - \
@@ -61,7 +62,7 @@ extern kprobe_opcode_t optprobe_template_end;
extern const int kretprobe_blacklist_size;
void arch_remove_kprobe(struct kprobe *p);
-void kretprobe_trampoline(void);
+asmlinkage void kretprobe_trampoline(void);
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
@@ -76,6 +77,7 @@ struct arch_specific_insn {
* a post_handler or break_handler).
*/
int boostable;
+ bool if_modifier;
};
struct arch_optimized_insn {
@@ -114,4 +116,6 @@ struct kprobe_ctlblk {
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
+extern int kprobe_int3_handler(struct pt_regs *regs);
+extern int kprobe_debug_handler(struct pt_regs *regs);
#endif /* _ASM_X86_KPROBES_H */
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
deleted file mode 100644
index 4d8dcbdfc12..00000000000
--- a/arch/x86/include/asm/kvm.h
+++ /dev/null
@@ -1,324 +0,0 @@
-#ifndef _ASM_X86_KVM_H
-#define _ASM_X86_KVM_H
-
-/*
- * KVM x86 specific structures and definitions
- *
- */
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-/* Select x86 specific features in <linux/kvm.h> */
-#define __KVM_HAVE_PIT
-#define __KVM_HAVE_IOAPIC
-#define __KVM_HAVE_DEVICE_ASSIGNMENT
-#define __KVM_HAVE_MSI
-#define __KVM_HAVE_USER_NMI
-#define __KVM_HAVE_GUEST_DEBUG
-#define __KVM_HAVE_MSIX
-#define __KVM_HAVE_MCE
-#define __KVM_HAVE_PIT_STATE2
-#define __KVM_HAVE_XEN_HVM
-#define __KVM_HAVE_VCPU_EVENTS
-#define __KVM_HAVE_DEBUGREGS
-#define __KVM_HAVE_XSAVE
-#define __KVM_HAVE_XCRS
-
-/* Architectural interrupt line count. */
-#define KVM_NR_INTERRUPTS 256
-
-struct kvm_memory_alias {
- __u32 slot; /* this has a different namespace than memory slots */
- __u32 flags;
- __u64 guest_phys_addr;
- __u64 memory_size;
- __u64 target_phys_addr;
-};
-
-/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
-struct kvm_pic_state {
- __u8 last_irr; /* edge detection */
- __u8 irr; /* interrupt request register */
- __u8 imr; /* interrupt mask register */
- __u8 isr; /* interrupt service register */
- __u8 priority_add; /* highest irq priority */
- __u8 irq_base;
- __u8 read_reg_select;
- __u8 poll;
- __u8 special_mask;
- __u8 init_state;
- __u8 auto_eoi;
- __u8 rotate_on_auto_eoi;
- __u8 special_fully_nested_mode;
- __u8 init4; /* true if 4 byte init */
- __u8 elcr; /* PIIX edge/trigger selection */
- __u8 elcr_mask;
-};
-
-#define KVM_IOAPIC_NUM_PINS 24
-struct kvm_ioapic_state {
- __u64 base_address;
- __u32 ioregsel;
- __u32 id;
- __u32 irr;
- __u32 pad;
- union {
- __u64 bits;
- struct {
- __u8 vector;
- __u8 delivery_mode:3;
- __u8 dest_mode:1;
- __u8 delivery_status:1;
- __u8 polarity:1;
- __u8 remote_irr:1;
- __u8 trig_mode:1;
- __u8 mask:1;
- __u8 reserve:7;
- __u8 reserved[4];
- __u8 dest_id;
- } fields;
- } redirtbl[KVM_IOAPIC_NUM_PINS];
-};
-
-#define KVM_IRQCHIP_PIC_MASTER 0
-#define KVM_IRQCHIP_PIC_SLAVE 1
-#define KVM_IRQCHIP_IOAPIC 2
-#define KVM_NR_IRQCHIPS 3
-
-/* for KVM_GET_REGS and KVM_SET_REGS */
-struct kvm_regs {
- /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
- __u64 rax, rbx, rcx, rdx;
- __u64 rsi, rdi, rsp, rbp;
- __u64 r8, r9, r10, r11;
- __u64 r12, r13, r14, r15;
- __u64 rip, rflags;
-};
-
-/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
-#define KVM_APIC_REG_SIZE 0x400
-struct kvm_lapic_state {
- char regs[KVM_APIC_REG_SIZE];
-};
-
-struct kvm_segment {
- __u64 base;
- __u32 limit;
- __u16 selector;
- __u8 type;
- __u8 present, dpl, db, s, l, g, avl;
- __u8 unusable;
- __u8 padding;
-};
-
-struct kvm_dtable {
- __u64 base;
- __u16 limit;
- __u16 padding[3];
-};
-
-
-/* for KVM_GET_SREGS and KVM_SET_SREGS */
-struct kvm_sregs {
- /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
- struct kvm_segment cs, ds, es, fs, gs, ss;
- struct kvm_segment tr, ldt;
- struct kvm_dtable gdt, idt;
- __u64 cr0, cr2, cr3, cr4, cr8;
- __u64 efer;
- __u64 apic_base;
- __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
-};
-
-/* for KVM_GET_FPU and KVM_SET_FPU */
-struct kvm_fpu {
- __u8 fpr[8][16];
- __u16 fcw;
- __u16 fsw;
- __u8 ftwx; /* in fxsave format */
- __u8 pad1;
- __u16 last_opcode;
- __u64 last_ip;
- __u64 last_dp;
- __u8 xmm[16][16];
- __u32 mxcsr;
- __u32 pad2;
-};
-
-struct kvm_msr_entry {
- __u32 index;
- __u32 reserved;
- __u64 data;
-};
-
-/* for KVM_GET_MSRS and KVM_SET_MSRS */
-struct kvm_msrs {
- __u32 nmsrs; /* number of msrs in entries */
- __u32 pad;
-
- struct kvm_msr_entry entries[0];
-};
-
-/* for KVM_GET_MSR_INDEX_LIST */
-struct kvm_msr_list {
- __u32 nmsrs; /* number of msrs in entries */
- __u32 indices[0];
-};
-
-
-struct kvm_cpuid_entry {
- __u32 function;
- __u32 eax;
- __u32 ebx;
- __u32 ecx;
- __u32 edx;
- __u32 padding;
-};
-
-/* for KVM_SET_CPUID */
-struct kvm_cpuid {
- __u32 nent;
- __u32 padding;
- struct kvm_cpuid_entry entries[0];
-};
-
-struct kvm_cpuid_entry2 {
- __u32 function;
- __u32 index;
- __u32 flags;
- __u32 eax;
- __u32 ebx;
- __u32 ecx;
- __u32 edx;
- __u32 padding[3];
-};
-
-#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1
-#define KVM_CPUID_FLAG_STATEFUL_FUNC 2
-#define KVM_CPUID_FLAG_STATE_READ_NEXT 4
-
-/* for KVM_SET_CPUID2 */
-struct kvm_cpuid2 {
- __u32 nent;
- __u32 padding;
- struct kvm_cpuid_entry2 entries[0];
-};
-
-/* for KVM_GET_PIT and KVM_SET_PIT */
-struct kvm_pit_channel_state {
- __u32 count; /* can be 65536 */
- __u16 latched_count;
- __u8 count_latched;
- __u8 status_latched;
- __u8 status;
- __u8 read_state;
- __u8 write_state;
- __u8 write_latch;
- __u8 rw_mode;
- __u8 mode;
- __u8 bcd;
- __u8 gate;
- __s64 count_load_time;
-};
-
-struct kvm_debug_exit_arch {
- __u32 exception;
- __u32 pad;
- __u64 pc;
- __u64 dr6;
- __u64 dr7;
-};
-
-#define KVM_GUESTDBG_USE_SW_BP 0x00010000
-#define KVM_GUESTDBG_USE_HW_BP 0x00020000
-#define KVM_GUESTDBG_INJECT_DB 0x00040000
-#define KVM_GUESTDBG_INJECT_BP 0x00080000
-
-/* for KVM_SET_GUEST_DEBUG */
-struct kvm_guest_debug_arch {
- __u64 debugreg[8];
-};
-
-struct kvm_pit_state {
- struct kvm_pit_channel_state channels[3];
-};
-
-#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
-
-struct kvm_pit_state2 {
- struct kvm_pit_channel_state channels[3];
- __u32 flags;
- __u32 reserved[9];
-};
-
-struct kvm_reinject_control {
- __u8 pit_reinject;
- __u8 reserved[31];
-};
-
-/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
-#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
-#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
-#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
-
-/* Interrupt shadow states */
-#define KVM_X86_SHADOW_INT_MOV_SS 0x01
-#define KVM_X86_SHADOW_INT_STI 0x02
-
-/* for KVM_GET/SET_VCPU_EVENTS */
-struct kvm_vcpu_events {
- struct {
- __u8 injected;
- __u8 nr;
- __u8 has_error_code;
- __u8 pad;
- __u32 error_code;
- } exception;
- struct {
- __u8 injected;
- __u8 nr;
- __u8 soft;
- __u8 shadow;
- } interrupt;
- struct {
- __u8 injected;
- __u8 pending;
- __u8 masked;
- __u8 pad;
- } nmi;
- __u32 sipi_vector;
- __u32 flags;
- __u32 reserved[10];
-};
-
-/* for KVM_GET/SET_DEBUGREGS */
-struct kvm_debugregs {
- __u64 db[4];
- __u64 dr6;
- __u64 dr7;
- __u64 flags;
- __u64 reserved[9];
-};
-
-/* for KVM_CAP_XSAVE */
-struct kvm_xsave {
- __u32 region[1024];
-};
-
-#define KVM_MAX_XCRS 16
-
-struct kvm_xcr {
- __u32 xcr;
- __u32 reserved;
- __u64 value;
-};
-
-struct kvm_xcrs {
- __u32 nr_xcrs;
- __u32 flags;
- struct kvm_xcr xcrs[KVM_MAX_XCRS];
- __u64 padding[16];
-};
-
-#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 7b9cfc4878a..a04fe4eb237 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -86,6 +86,19 @@ struct x86_instruction_info {
struct x86_emulate_ops {
/*
+ * read_gpr: read a general purpose register (rax - r15)
+ *
+ * @reg: gpr number.
+ */
+ ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg);
+ /*
+ * write_gpr: write a general purpose register (rax - r15)
+ *
+ * @reg: gpr number.
+ * @val: value to write.
+ */
+ void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val);
+ /*
* read_std: Read bytes of standard (non-emulated/special) memory.
* Used for descriptor reading.
* @addr: [IN ] Linear address from which to read.
@@ -191,16 +204,17 @@ struct x86_emulate_ops {
struct x86_instruction_info *info,
enum x86_intercept_stage stage);
- bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
- u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
+ void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
+ u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
};
typedef u32 __attribute__((vector_size(16))) sse128_t;
/* Type, address-of, and value of an instruction's operand. */
struct operand {
- enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_NONE } type;
+ enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
unsigned int bytes;
+ unsigned int count;
union {
unsigned long orig_val;
u64 orig_val64;
@@ -212,12 +226,15 @@ struct operand {
unsigned seg;
} mem;
unsigned xmm;
+ unsigned mm;
} addr;
union {
unsigned long val;
u64 val64;
char valptr[sizeof(unsigned long) + 2];
sse128_t vec_val;
+ u64 mm_val;
+ void *data;
};
};
@@ -233,27 +250,40 @@ struct read_cache {
unsigned long end;
};
+/* Execution mode, passed to the emulator. */
+enum x86emul_mode {
+ X86EMUL_MODE_REAL, /* Real mode. */
+ X86EMUL_MODE_VM86, /* Virtual 8086 mode. */
+ X86EMUL_MODE_PROT16, /* 16-bit protected mode. */
+ X86EMUL_MODE_PROT32, /* 32-bit protected mode. */
+ X86EMUL_MODE_PROT64, /* 64-bit (long) mode. */
+};
+
struct x86_emulate_ctxt {
- struct x86_emulate_ops *ops;
+ const struct x86_emulate_ops *ops;
/* Register state before/after emulation. */
unsigned long eflags;
unsigned long eip; /* eip before instruction emulation */
/* Emulated execution mode, represented by an X86EMUL_MODE value. */
- int mode;
+ enum x86emul_mode mode;
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
bool guest_mode; /* guest running a nested guest */
bool perm_ok; /* do not check permissions if true */
- bool only_vendor_specific_insn;
+ bool ud; /* inject an #UD if host doesn't support insn */
bool have_exception;
struct x86_exception exception;
- /* decode cache */
- u8 twobyte;
+ /*
+ * decode cache
+ */
+
+ /* current opcode length in bytes */
+ u8 opcode_len;
u8 b;
u8 intercept;
u8 lock_prefix;
@@ -277,9 +307,11 @@ struct x86_emulate_ctxt {
u8 modrm_seg;
bool rip_relative;
unsigned long _eip;
- /* Fields above regs are cleared together. */
- unsigned long regs[NR_VCPU_REGS];
struct operand memop;
+ u32 regs_valid; /* bitmaps of registers in _regs[] that can be read */
+ u32 regs_dirty; /* bitmaps of registers in _regs[] that have been written */
+ /* Fields above regs are cleared together. */
+ unsigned long _regs[NR_VCPU_REGS];
struct operand *memopp;
struct fetch_cache fetch;
struct read_cache io_read;
@@ -290,17 +322,6 @@ struct x86_emulate_ctxt {
#define REPE_PREFIX 0xf3
#define REPNE_PREFIX 0xf2
-/* Execution mode, passed to the emulator. */
-#define X86EMUL_MODE_REAL 0 /* Real mode. */
-#define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */
-#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
-#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
-#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
-
-/* any protected mode */
-#define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \
- X86EMUL_MODE_PROT64)
-
/* CPUID vendors */
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
@@ -388,7 +409,10 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_INTERCEPTED 2
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
- u16 tss_selector, int reason,
+ u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code);
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
+void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt);
+void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt);
+
#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_guest.h b/arch/x86/include/asm/kvm_guest.h
new file mode 100644
index 00000000000..a92b1763c41
--- /dev/null
+++ b/arch/x86/include/asm/kvm_guest.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_KVM_GUEST_H
+#define _ASM_X86_KVM_GUEST_H
+
+int kvm_setup_vsyscall_timeinfo(void);
+
+#endif /* _ASM_X86_KVM_GUEST_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 52d6640a5ca..49205d01b9a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -22,39 +22,41 @@
#include <linux/kvm_para.h>
#include <linux/kvm_types.h>
#include <linux/perf_event.h>
+#include <linux/pvclock_gtod.h>
+#include <linux/clocksource.h>
#include <asm/pvclock-abi.h>
#include <asm/desc.h>
#include <asm/mtrr.h>
#include <asm/msr-index.h>
+#include <asm/asm.h>
-#define KVM_MAX_VCPUS 254
-#define KVM_SOFT_MAX_VCPUS 64
-#define KVM_MEMORY_SLOTS 32
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
-#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#define KVM_MAX_VCPUS 255
+#define KVM_SOFT_MAX_VCPUS 160
+#define KVM_USER_MEM_SLOTS 125
+/* memory slots that are not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 3
+#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#define KVM_MMIO_SIZE 16
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
+#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
+
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
| X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
-#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
-#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
-#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
- 0xFFFFFF0000000000ULL)
+#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
#define CR4_RESERVED_BITS \
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
- | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \
- | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
+ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
+ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
+ | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -73,21 +75,12 @@
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
-#define DE_VECTOR 0
-#define DB_VECTOR 1
-#define BP_VECTOR 3
-#define OF_VECTOR 4
-#define BR_VECTOR 5
-#define UD_VECTOR 6
-#define NM_VECTOR 7
-#define DF_VECTOR 8
-#define TS_VECTOR 10
-#define NP_VECTOR 11
-#define SS_VECTOR 12
-#define GP_VECTOR 13
-#define PF_VECTOR 14
-#define MF_VECTOR 16
-#define MC_VECTOR 18
+static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
+{
+ /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
+ return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
+ (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
+}
#define SELECTOR_TI_MASK (1 << 2)
#define SELECTOR_RPL_MASK 0x03
@@ -102,13 +95,10 @@
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 80
#define KVM_NR_FIXED_MTRR_REGION 88
-#define KVM_NR_VAR_MTRR 8
+#define KVM_NR_VAR_MTRR 10
#define ASYNC_PF_PER_VCPU 64
-extern raw_spinlock_t kvm_lock;
-extern struct list_head vm_list;
-
struct kvm_vcpu;
struct kvm;
struct kvm_async_pf;
@@ -140,7 +130,6 @@ enum kvm_reg_ex {
VCPU_EXREG_PDPTR = NR_VCPU_REGS,
VCPU_EXREG_CR3,
VCPU_EXREG_RFLAGS,
- VCPU_EXREG_CPL,
VCPU_EXREG_SEGMENTS,
};
@@ -172,6 +161,16 @@ enum {
#define DR7_FIXED_1 0x00000400
#define DR7_VOLATILE 0xffff23ff
+/* apic attention bits */
+#define KVM_APIC_CHECK_VAPIC 0
+/*
+ * The following bit is set with PV-EOI, unset on EOI.
+ * We detect PV-EOI changes by guest by comparing
+ * this bit with PV-EOI in guest memory.
+ * See the implementation in apic_update_pv_eoi.
+ */
+#define KVM_APIC_PV_EOI_PENDING 1
+
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
@@ -181,13 +180,6 @@ struct kvm_mmu_memory_cache {
void *objects[KVM_NR_MEM_OBJS];
};
-#define NR_PTE_CHAIN_ENTRIES 5
-
-struct kvm_pte_chain {
- u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
- struct hlist_node link;
-};
-
/*
* kvm_mmu_page_role, below, is defined as:
*
@@ -228,24 +220,26 @@ struct kvm_mmu_page {
u64 *spt;
/* hold the gfn of each spte inside spt */
gfn_t *gfns;
- /*
- * One bit set per slot which has memory
- * in this shadow page.
- */
- DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
bool unsync;
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
unsigned long parent_ptes; /* Reverse mapping for parent_pte */
+
+ /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
+ unsigned long mmu_valid_gen;
+
DECLARE_BITMAP(unsync_child_bitmap, 512);
#ifdef CONFIG_X86_32
+ /*
+ * Used out of the mmu-lock to avoid reading spte values while an
+ * update is in progress; see the comments in __get_spte_lockless().
+ */
int clear_spte_count;
#endif
+ /* Number of writes since the last time traversal visited this page. */
int write_flooding_count;
-
- struct rcu_head rcu;
};
struct kvm_pio_request {
@@ -261,7 +255,6 @@ struct kvm_pio_request {
* mode.
*/
struct kvm_mmu {
- void (*new_cr3)(struct kvm_vcpu *vcpu);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
@@ -269,7 +262,6 @@ struct kvm_mmu {
bool prefault);
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
- void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
struct x86_exception *exception);
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
@@ -284,9 +276,24 @@ struct kvm_mmu {
union kvm_mmu_page_role base_role;
bool direct_map;
+ /*
+ * Bitmap; bit set = permission fault
+ * Byte index: page fault error code [4:1]
+ * Bit index: pte permissions in ACC_* format
+ */
+ u8 permissions[16];
+
u64 *pae_root;
u64 *lm_root;
u64 rsvd_bits_mask[2][4];
+ u64 bad_mt_xwr;
+
+ /*
+ * Bitmap: bit set = last pte in walk
+ * index[0:1]: level (zero-based)
+ * index[2]: pte.ps
+ */
+ u8 last_pte_bitmap;
bool nx;
@@ -317,13 +324,19 @@ struct kvm_pmu {
u64 global_ovf_ctrl;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
+ u64 reserved_bits;
u8 version;
- struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
- struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
+ struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
+ struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
struct irq_work irq_work;
u64 reprogram_pmi;
};
+enum {
+ KVM_DEBUGREG_BP_ENABLED = 1,
+ KVM_DEBUGREG_WONT_EXIT = 2,
+};
+
struct kvm_vcpu_arch {
/*
* rip and regs accesses must go through
@@ -344,9 +357,9 @@ struct kvm_vcpu_arch {
u64 efer;
u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */
+ unsigned long apic_attention;
int32_t apic_arb_prio;
int mp_state;
- int sipi_vector;
u64 ia32_misc_enable_msr;
bool tpr_access_reporting;
@@ -381,6 +394,8 @@ struct kvm_vcpu_arch {
struct fpu guest_fpu;
u64 xcr0;
+ u64 guest_supported_xcr0;
+ u32 guest_xstate_size;
struct kvm_pio_request pio;
void *pio_data;
@@ -410,12 +425,15 @@ struct kvm_vcpu_arch {
struct x86_emulate_ctxt emulate_ctxt;
bool emulate_regs_need_sync_to_vcpu;
bool emulate_regs_need_sync_from_vcpu;
+ int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
gpa_t time;
struct pvclock_vcpu_time_info hv_clock;
unsigned int hw_tsc_khz;
- unsigned int time_offset;
- struct page *time_page;
+ struct gfn_to_hva_cache pv_time;
+ bool pv_time_enabled;
+ /* set guest stopped flag in pvclock flags field */
+ bool pvclock_set_guest_stopped_request;
struct {
u64 msr_val;
@@ -426,26 +444,31 @@ struct kvm_vcpu_arch {
} st;
u64 last_guest_tsc;
- u64 last_kernel_ns;
- u64 last_tsc_nsec;
- u64 last_tsc_write;
- u32 virtual_tsc_khz;
+ u64 last_host_tsc;
+ u64 tsc_offset_adjustment;
+ u64 this_tsc_nsec;
+ u64 this_tsc_write;
+ u8 this_tsc_generation;
bool tsc_catchup;
- u32 tsc_catchup_mult;
- s8 tsc_catchup_shift;
+ bool tsc_always_catchup;
+ s8 virtual_tsc_shift;
+ u32 virtual_tsc_mult;
+ u32 virtual_tsc_khz;
+ s64 ia32_tsc_adjust_msr;
atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
unsigned nmi_pending; /* NMI queued after currently running handler */
bool nmi_injected; /* Trying to inject an NMI this entry */
struct mtrr_state_type mtrr_state;
- u32 pat;
+ u64 pat;
- int switch_db_regs;
+ unsigned switch_db_regs;
unsigned long db[KVM_NR_DB_REGS];
unsigned long dr6;
unsigned long dr7;
unsigned long eff_db[KVM_NR_DB_REGS];
+ unsigned long guest_debug_dr7;
u64 mcg_cap;
u64 mcg_status;
@@ -478,6 +501,51 @@ struct kvm_vcpu_arch {
u32 id;
bool send_user_only;
} apf;
+
+ /* OSVW MSRs (AMD only) */
+ struct {
+ u64 length;
+ u64 status;
+ } osvw;
+
+ struct {
+ u64 msr_val;
+ struct gfn_to_hva_cache data;
+ } pv_eoi;
+
+ /*
+ * Indicate whether the access faults on its page table in guest
+ * which is set when fix page fault and used to detect unhandeable
+ * instruction.
+ */
+ bool write_fault_to_shadow_pgtable;
+
+ /* set at EPT violation at this point */
+ unsigned long exit_qualification;
+
+ /* pv related host specific info */
+ struct {
+ bool pv_unhalted;
+ } pv;
+};
+
+struct kvm_lpage_info {
+ int write_count;
+};
+
+struct kvm_arch_memory_slot {
+ unsigned long *rmap[KVM_NR_PAGE_SIZES];
+ struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
+};
+
+struct kvm_apic_map {
+ struct rcu_head rcu;
+ u8 ldr_bits;
+ /* fields bellow are used to decode ldr values in different modes */
+ u32 cid_shift, cid_mask, lid_mask;
+ struct kvm_lapic *phys_map[256];
+ /* first index is cluster id second is cpu id in a cluster */
+ struct kvm_lapic *logical_map[16][16];
};
struct kvm_arch {
@@ -485,18 +553,25 @@ struct kvm_arch {
unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages;
unsigned int indirect_shadow_pages;
+ unsigned long mmu_valid_gen;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
* Hash table of struct kvm_mmu_page.
*/
struct list_head active_mmu_pages;
+ struct list_head zapped_obsolete_pages;
+
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
- int iommu_flags;
+ bool iommu_noncoherent;
+#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
+ atomic_t noncoherent_dma_count;
struct kvm_pic *vpic;
struct kvm_ioapic *vioapic;
struct kvm_pit *vpit;
int vapics_in_nmi_mode;
+ struct mutex apic_map_lock;
+ struct kvm_apic_map *apic_map;
unsigned int tss_addr;
struct page *apic_access_page;
@@ -511,16 +586,27 @@ struct kvm_arch {
s64 kvmclock_offset;
raw_spinlock_t tsc_write_lock;
u64 last_tsc_nsec;
- u64 last_tsc_offset;
u64 last_tsc_write;
+ u32 last_tsc_khz;
+ u64 cur_tsc_nsec;
+ u64 cur_tsc_write;
+ u64 cur_tsc_offset;
+ u8 cur_tsc_generation;
+ int nr_vcpus_matched_tsc;
+
+ spinlock_t pvclock_gtod_sync_lock;
+ bool use_master_clock;
+ u64 master_kernel_ns;
+ cycle_t master_cycle_now;
+ struct delayed_work kvmclock_update_work;
+ struct delayed_work kvmclock_sync_work;
struct kvm_xen_hvm_config xen_hvm_config;
/* fields used by HYPER-V emulation */
u64 hv_guest_os_id;
u64 hv_hypercall;
-
- atomic_t reader_counter;
+ u64 hv_tsc_page;
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
@@ -568,6 +654,12 @@ struct kvm_vcpu_stat {
struct x86_instruction_info;
+struct msr_data {
+ bool host_initiated;
+ u32 index;
+ u64 data;
+};
+
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
@@ -582,16 +674,15 @@ struct kvm_x86_ops {
/* Create, but do not attach this VCPU */
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
- int (*vcpu_reset)(struct kvm_vcpu *vcpu);
+ void (*vcpu_reset)(struct kvm_vcpu *vcpu);
void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
void (*vcpu_put)(struct kvm_vcpu *vcpu);
- void (*set_guest_debug)(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug *dbg);
+ void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
- int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+ int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
void (*get_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
@@ -610,6 +701,9 @@ struct kvm_x86_ops {
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ u64 (*get_dr6)(struct kvm_vcpu *vcpu);
+ void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
+ void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
@@ -639,12 +733,20 @@ struct kvm_x86_ops {
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
+ int (*vm_has_apicv)(struct kvm *kvm);
+ void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
+ void (*hwapic_isr_update)(struct kvm *kvm, int isr);
+ void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
+ void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
- void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
+ bool (*invpcid_supported)(void);
+ void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -652,17 +754,22 @@ struct kvm_x86_ops {
bool (*has_wbinvd_exit)(void);
- void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
+ void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
+ u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
- u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu);
+ u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
int (*check_intercept)(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage);
+ void (*handle_external_intr)(struct kvm_vcpu *vcpu);
+ bool (*mpx_supported)(void);
+
+ int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
};
struct kvm_arch_async_pf {
@@ -674,20 +781,33 @@ struct kvm_arch_async_pf {
extern struct kvm_x86_ops *kvm_x86_ops;
+static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+ s64 adjustment)
+{
+ kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
+}
+
+static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
+{
+ kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
+}
+
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
-int kvm_mmu_setup(struct kvm_vcpu *vcpu);
+void kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask);
-int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
+void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
-int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
- struct kvm_memory_slot *slot);
+void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
+void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
@@ -709,8 +829,8 @@ extern u32 kvm_min_guest_tsc_khz;
extern u32 kvm_max_guest_tsc_khz;
enum emulation_result {
- EMULATE_DONE, /* no further processing */
- EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
+ EMULATE_DONE, /* no further processing */
+ EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */
EMULATE_FAIL, /* can't emulate this instruction */
};
@@ -718,6 +838,7 @@ enum emulation_result {
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
#define EMULTYPE_RETRY (1 << 3)
+#define EMULTYPE_NO_REEXECUTE (1 << 4)
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
int emulation_type, void *insn, int insn_len);
@@ -728,8 +849,9 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
}
void kvm_enable_efer_bits(u64);
+bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
struct x86_emulate_ctxt;
@@ -740,9 +862,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
+void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector);
-int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
- bool has_error_code, u32 error_code);
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
+ int reason, bool has_error_code, u32 error_code);
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -756,7 +879,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -773,7 +896,20 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
-int kvm_pic_set_irq(void *opaque, int irq, int level);
+static inline int __kvm_irq_line_state(unsigned long *irq_state,
+ int irq_source_id, int level)
+{
+ /* Logical OR for level trig interrupt */
+ if (level)
+ __set_bit(irq_source_id, irq_state);
+ else
+ __clear_bit(irq_source_id, irq_state);
+
+ return !!(*irq_state);
+}
+
+int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
+void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
@@ -803,13 +939,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
void kvm_enable_tdp(void);
void kvm_disable_tdp(void);
-int complete_pio(struct kvm_vcpu *vcpu);
-bool kvm_check_iopl(struct kvm_vcpu *vcpu);
-
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
{
return gpa;
@@ -881,7 +1015,6 @@ enum {
* Trap the fault and ignore the instruction if that happens.
*/
asmlinkage void kvm_spurious_fault(void);
-extern bool kvm_rebooting;
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
"666: " insn "\n\t" \
@@ -894,22 +1027,23 @@ extern bool kvm_rebooting;
__ASM_SIZE(push) " $666b \n\t" \
"call kvm_spurious_fault \n\t" \
".popsection \n\t" \
- ".pushsection __ex_table, \"a\" \n\t" \
- _ASM_PTR " 666b, 667b \n\t" \
- ".popsection"
+ _ASM_EXTABLE(666b, 667b)
#define __kvm_handle_fault_on_reboot(insn) \
____kvm_handle_fault_on_reboot(insn, "")
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
+int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_define_shared_msr(unsigned index, u32 msr);
void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
@@ -935,7 +1069,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
-int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 734c3767cfa..c7678e43465 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -1,113 +1,33 @@
#ifndef _ASM_X86_KVM_PARA_H
#define _ASM_X86_KVM_PARA_H
-#include <linux/types.h>
-#include <asm/hyperv.h>
-
-/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
- * should be used to determine that a VM is running under KVM.
- */
-#define KVM_CPUID_SIGNATURE 0x40000000
-
-/* This CPUID returns a feature bitmap in eax. Before enabling a particular
- * paravirtualization, the appropriate feature bit should be checked.
- */
-#define KVM_CPUID_FEATURES 0x40000001
-#define KVM_FEATURE_CLOCKSOURCE 0
-#define KVM_FEATURE_NOP_IO_DELAY 1
-#define KVM_FEATURE_MMU_OP 2
-/* This indicates that the new set of kvmclock msrs
- * are available. The use of 0x11 and 0x12 is deprecated
- */
-#define KVM_FEATURE_CLOCKSOURCE2 3
-#define KVM_FEATURE_ASYNC_PF 4
-#define KVM_FEATURE_STEAL_TIME 5
-
-/* The last 8 bits are used to indicate how to interpret the flags field
- * in pvclock structure. If no bits are set, all flags are ignored.
- */
-#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
-
-#define MSR_KVM_WALL_CLOCK 0x11
-#define MSR_KVM_SYSTEM_TIME 0x12
-
-#define KVM_MSR_ENABLED 1
-/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
-#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
-#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
-#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
-#define MSR_KVM_STEAL_TIME 0x4b564d03
-
-struct kvm_steal_time {
- __u64 steal;
- __u32 version;
- __u32 flags;
- __u32 pad[12];
-};
-
-#define KVM_STEAL_ALIGNMENT_BITS 5
-#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
-#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
-
-#define KVM_MAX_MMU_OP_BATCH 32
-
-#define KVM_ASYNC_PF_ENABLED (1 << 0)
-#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
-
-/* Operations for KVM_HC_MMU_OP */
-#define KVM_MMU_OP_WRITE_PTE 1
-#define KVM_MMU_OP_FLUSH_TLB 2
-#define KVM_MMU_OP_RELEASE_PT 3
-
-/* Payload for KVM_HC_MMU_OP */
-struct kvm_mmu_op_header {
- __u32 op;
- __u32 pad;
-};
-
-struct kvm_mmu_op_write_pte {
- struct kvm_mmu_op_header header;
- __u64 pte_phys;
- __u64 pte_val;
-};
-
-struct kvm_mmu_op_flush_tlb {
- struct kvm_mmu_op_header header;
-};
-
-struct kvm_mmu_op_release_pt {
- struct kvm_mmu_op_header header;
- __u64 pt_phys;
-};
-
-#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
-#define KVM_PV_REASON_PAGE_READY 2
-
-struct kvm_vcpu_pv_apf_data {
- __u32 reason;
- __u8 pad[60];
- __u32 enabled;
-};
-
-#ifdef __KERNEL__
#include <asm/processor.h>
+#include <uapi/asm/kvm_para.h>
extern void kvmclock_init(void);
extern int kvm_register_clock(char *txt);
+#ifdef CONFIG_KVM_GUEST
+bool kvm_check_and_clear_guest_paused(void);
+#else
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+#endif /* CONFIG_KVM_GUEST */
/* This instruction is vmcall. On non-VT architectures, it will generate a
* trap that we will then rewrite to the appropriate instruction.
*/
#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
-/* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
+/* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
* instruction. The hypervisor may replace it with something else but only the
* instructions are guaranteed to be supported.
*
* Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
* The hypercall number should be placed in rax and the return value will be
- * placed in rax. No other registers will be clobbered unless explicited
+ * placed in rax. No other registers will be clobbered unless explicitly
* noted by the particular hypercall.
*/
@@ -165,38 +85,38 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
return ret;
}
-static inline int kvm_para_available(void)
-{
- unsigned int eax, ebx, ecx, edx;
- char signature[13];
+#ifdef CONFIG_KVM_GUEST
+bool kvm_para_available(void);
+unsigned int kvm_arch_para_features(void);
+void __init kvm_guest_init(void);
+void kvm_async_pf_task_wait(u32 token);
+void kvm_async_pf_task_wake(u32 token);
+u32 kvm_read_and_reset_pf_reason(void);
+extern void kvm_disable_steal_time(void);
- cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
- memcpy(signature + 0, &ebx, 4);
- memcpy(signature + 4, &ecx, 4);
- memcpy(signature + 8, &edx, 4);
- signature[12] = 0;
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void __init kvm_spinlock_init(void);
+#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static inline void kvm_spinlock_init(void)
+{
+}
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
- if (strcmp(signature, "KVMKVMKVM") == 0)
- return 1;
+#else /* CONFIG_KVM_GUEST */
+#define kvm_guest_init() do {} while (0)
+#define kvm_async_pf_task_wait(T) do {} while(0)
+#define kvm_async_pf_task_wake(T) do {} while(0)
+static inline bool kvm_para_available(void)
+{
return 0;
}
static inline unsigned int kvm_arch_para_features(void)
{
- return cpuid_eax(KVM_CPUID_FEATURES);
+ return 0;
}
-#ifdef CONFIG_KVM_GUEST
-void __init kvm_guest_init(void);
-void kvm_async_pf_task_wait(u32 token);
-void kvm_async_pf_task_wake(u32 token);
-u32 kvm_read_and_reset_pf_reason(void);
-extern void kvm_disable_steal_time(void);
-#else
-#define kvm_guest_init() do { } while (0)
-#define kvm_async_pf_task_wait(T) do {} while(0)
-#define kvm_async_pf_task_wake(T) do {} while(0)
static inline u32 kvm_read_and_reset_pf_reason(void)
{
return 0;
@@ -208,6 +128,4 @@ static inline void kvm_disable_steal_time(void)
}
#endif
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/include/asm/ldt.h b/arch/x86/include/asm/ldt.h
deleted file mode 100644
index 46727eb37bf..00000000000
--- a/arch/x86/include/asm/ldt.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * ldt.h
- *
- * Definitions of structures used with the modify_ldt system call.
- */
-#ifndef _ASM_X86_LDT_H
-#define _ASM_X86_LDT_H
-
-/* Maximum number of LDT entries supported. */
-#define LDT_ENTRIES 8192
-/* The size of each LDT entry. */
-#define LDT_ENTRY_SIZE 8
-
-#ifndef __ASSEMBLY__
-/*
- * Note on 64bit base and limit is ignored and you cannot set DS/ES/CS
- * not to the default values if you still want to do syscalls. This
- * call is more for 32bit mode therefore.
- */
-struct user_desc {
- unsigned int entry_number;
- unsigned int base_addr;
- unsigned int limit;
- unsigned int seg_32bit:1;
- unsigned int contents:2;
- unsigned int read_exec_only:1;
- unsigned int limit_in_pages:1;
- unsigned int seg_not_present:1;
- unsigned int useable:1;
-#ifdef __x86_64__
- unsigned int lm:1;
-#endif
-};
-
-#define MODIFY_LDT_CONTENTS_DATA 0
-#define MODIFY_LDT_CONTENTS_STACK 1
-#define MODIFY_LDT_CONTENTS_CODE 2
-
-#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_X86_LDT_H */
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index 0d97deba1e3..e2d4a4afa8c 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -11,18 +11,11 @@
#define GUEST_PL 1
-/* Every guest maps the core switcher code. */
-#define SHARED_SWITCHER_PAGES \
- DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
-/* Pages for switcher itself, then two pages per cpu */
-#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
-
-/* We map at -4M (-2M for PAE) for ease of mapping (one PTE page). */
-#ifdef CONFIG_X86_PAE
-#define SWITCHER_ADDR 0xFFE00000
-#else
-#define SWITCHER_ADDR 0xFFC00000
-#endif
+/* Page for Switcher text itself, then two pages per cpu */
+#define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids)
+
+/* Where we map the Switcher, in both Host and Guest. */
+extern unsigned long switcher_addr;
/* Found in switcher.S */
extern unsigned long default_idt_entries[];
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 48142971b25..79327e9483a 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -27,20 +27,20 @@
#define __asmlinkage_protect0(ret) \
__asmlinkage_protect_n(ret)
#define __asmlinkage_protect1(ret, arg1) \
- __asmlinkage_protect_n(ret, "g" (arg1))
+ __asmlinkage_protect_n(ret, "m" (arg1))
#define __asmlinkage_protect2(ret, arg1, arg2) \
- __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2))
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
- __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3))
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
- __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
- "g" (arg4))
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4))
#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
- __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
- "g" (arg4), "g" (arg5))
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5))
#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
- __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
- "g" (arg4), "g" (arg5), "g" (arg6))
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5), "m" (arg6))
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 9cdae5d47e8..4ad6560847b 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -3,7 +3,6 @@
#include <linux/percpu.h>
-#include <asm/system.h>
#include <linux/atomic.h>
#include <asm/asm.h>
@@ -53,12 +52,7 @@ static inline void local_sub(long i, local_t *l)
*/
static inline int local_sub_and_test(long i, local_t *l)
{
- unsigned char c;
-
- asm volatile(_ASM_SUB "%2,%0; sete %1"
- : "+m" (l->a.counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
+ GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
}
/**
@@ -71,12 +65,7 @@ static inline int local_sub_and_test(long i, local_t *l)
*/
static inline int local_dec_and_test(local_t *l)
{
- unsigned char c;
-
- asm volatile(_ASM_DEC "%0; sete %1"
- : "+m" (l->a.counter), "=qm" (c)
- : : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
}
/**
@@ -89,12 +78,7 @@ static inline int local_dec_and_test(local_t *l)
*/
static inline int local_inc_and_test(local_t *l)
{
- unsigned char c;
-
- asm volatile(_ASM_INC "%0; sete %1"
- : "+m" (l->a.counter), "=qm" (c)
- : : "memory");
- return c != 0;
+ GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
}
/**
@@ -108,12 +92,7 @@ static inline int local_inc_and_test(local_t *l)
*/
static inline int local_add_negative(long i, local_t *l)
{
- unsigned char c;
-
- asm volatile(_ASM_ADD "%2,%0; sets %1"
- : "+m" (l->a.counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
+ GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
}
/**
@@ -125,27 +104,11 @@ static inline int local_add_negative(long i, local_t *l)
*/
static inline long local_add_return(long i, local_t *l)
{
- long __i;
-#ifdef CONFIG_M386
- unsigned long flags;
- if (unlikely(boot_cpu_data.x86 <= 3))
- goto no_xadd;
-#endif
- /* Modern 486+ processor */
- __i = i;
+ long __i = i;
asm volatile(_ASM_XADD "%0, %1;"
: "+r" (i), "+m" (l->a.counter)
: : "memory");
return i + __i;
-
-#ifdef CONFIG_M386
-no_xadd: /* Legacy 386 processor */
- local_irq_save(flags);
- __i = local_read(l);
- local_set(l, i + __i);
- local_irq_restore(flags);
- return i + __i;
-#endif
}
static inline long local_sub_return(long i, local_t *l)
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
index 0e8e85bb7c5..a55c7efcc4e 100644
--- a/arch/x86/include/asm/mc146818rtc.h
+++ b/arch/x86/include/asm/mc146818rtc.h
@@ -5,7 +5,6 @@
#define _ASM_X86_MC146818RTC_H
#include <asm/io.h>
-#include <asm/system.h>
#include <asm/processor.h>
#include <linux/mc146818rtc.h>
@@ -96,8 +95,8 @@ static inline unsigned char current_lock_cmos_reg(void)
unsigned char rtc_cmos_read(unsigned char addr);
void rtc_cmos_write(unsigned char val, unsigned char addr);
-extern int mach_set_rtc_mmss(unsigned long nowtime);
-extern unsigned long mach_get_cmos_time(void);
+extern int mach_set_rtc_mmss(const struct timespec *now);
+extern void mach_get_cmos_time(struct timespec *now);
#define RTC_IRQ 8
diff --git a/arch/x86/include/asm/mca.h b/arch/x86/include/asm/mca.h
deleted file mode 100644
index eedbb6cc1ef..00000000000
--- a/arch/x86/include/asm/mca.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* Platform specific MCA defines */
-#ifndef _ASM_X86_MCA_H
-#define _ASM_X86_MCA_H
-
-/* Maximal number of MCA slots - actually, some machines have less, but
- * they all have sufficient number of POS registers to cover 8.
- */
-#define MCA_MAX_SLOT_NR 8
-
-/* Most machines have only one MCA bus. The only multiple bus machines
- * I know have at most two */
-#define MAX_MCA_BUSSES 2
-
-#define MCA_PRIMARY_BUS 0
-#define MCA_SECONDARY_BUS 1
-
-/* Dummy slot numbers on primary MCA for integrated functions */
-#define MCA_INTEGSCSI (MCA_MAX_SLOT_NR)
-#define MCA_INTEGVIDEO (MCA_MAX_SLOT_NR+1)
-#define MCA_MOTHERBOARD (MCA_MAX_SLOT_NR+2)
-
-/* Dummy POS values for integrated functions */
-#define MCA_DUMMY_POS_START 0x10000
-#define MCA_INTEGSCSI_POS (MCA_DUMMY_POS_START+1)
-#define MCA_INTEGVIDEO_POS (MCA_DUMMY_POS_START+2)
-#define MCA_MOTHERBOARD_POS (MCA_DUMMY_POS_START+3)
-
-/* MCA registers */
-
-#define MCA_MOTHERBOARD_SETUP_REG 0x94
-#define MCA_ADAPTER_SETUP_REG 0x96
-#define MCA_POS_REG(n) (0x100+(n))
-
-#define MCA_ENABLED 0x01 /* POS 2, set if adapter enabled */
-
-/* Max number of adapters, including both slots and various integrated
- * things.
- */
-#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3)
-
-#endif /* _ASM_X86_MCA_H */
diff --git a/arch/x86/include/asm/mca_dma.h b/arch/x86/include/asm/mca_dma.h
deleted file mode 100644
index 45271aef82d..00000000000
--- a/arch/x86/include/asm/mca_dma.h
+++ /dev/null
@@ -1,201 +0,0 @@
-#ifndef _ASM_X86_MCA_DMA_H
-#define _ASM_X86_MCA_DMA_H
-
-#include <asm/io.h>
-#include <linux/ioport.h>
-
-/*
- * Microchannel specific DMA stuff. DMA on an MCA machine is fairly similar to
- * standard PC dma, but it certainly has its quirks. DMA register addresses
- * are in a different place and there are some added functions. Most of this
- * should be pretty obvious on inspection. Note that the user must divide
- * count by 2 when using 16-bit dma; that is not handled by these functions.
- *
- * Ramen Noodles are yummy.
- *
- * 1998 Tymm Twillman <tymm@computer.org>
- */
-
-/*
- * Registers that are used by the DMA controller; FN is the function register
- * (tell the controller what to do) and EXE is the execution register (how
- * to do it)
- */
-
-#define MCA_DMA_REG_FN 0x18
-#define MCA_DMA_REG_EXE 0x1A
-
-/*
- * Functions that the DMA controller can do
- */
-
-#define MCA_DMA_FN_SET_IO 0x00
-#define MCA_DMA_FN_SET_ADDR 0x20
-#define MCA_DMA_FN_GET_ADDR 0x30
-#define MCA_DMA_FN_SET_COUNT 0x40
-#define MCA_DMA_FN_GET_COUNT 0x50
-#define MCA_DMA_FN_GET_STATUS 0x60
-#define MCA_DMA_FN_SET_MODE 0x70
-#define MCA_DMA_FN_SET_ARBUS 0x80
-#define MCA_DMA_FN_MASK 0x90
-#define MCA_DMA_FN_RESET_MASK 0xA0
-#define MCA_DMA_FN_MASTER_CLEAR 0xD0
-
-/*
- * Modes (used by setting MCA_DMA_FN_MODE in the function register)
- *
- * Note that the MODE_READ is read from memory (write to device), and
- * MODE_WRITE is vice-versa.
- */
-
-#define MCA_DMA_MODE_XFER 0x04 /* read by default */
-#define MCA_DMA_MODE_READ 0x04 /* same as XFER */
-#define MCA_DMA_MODE_WRITE 0x08 /* OR with MODE_XFER to use */
-#define MCA_DMA_MODE_IO 0x01 /* DMA from IO register */
-#define MCA_DMA_MODE_16 0x40 /* 16 bit xfers */
-
-
-/**
- * mca_enable_dma - channel to enable DMA on
- * @dmanr: DMA channel
- *
- * Enable the MCA bus DMA on a channel. This can be called from
- * IRQ context.
- */
-
-static inline void mca_enable_dma(unsigned int dmanr)
-{
- outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN);
-}
-
-/**
- * mca_disble_dma - channel to disable DMA on
- * @dmanr: DMA channel
- *
- * Enable the MCA bus DMA on a channel. This can be called from
- * IRQ context.
- */
-
-static inline void mca_disable_dma(unsigned int dmanr)
-{
- outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN);
-}
-
-/**
- * mca_set_dma_addr - load a 24bit DMA address
- * @dmanr: DMA channel
- * @a: 24bit bus address
- *
- * Load the address register in the DMA controller. This has a 24bit
- * limitation (16Mb).
- */
-
-static inline void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN);
- outb(a & 0xff, MCA_DMA_REG_EXE);
- outb((a >> 8) & 0xff, MCA_DMA_REG_EXE);
- outb((a >> 16) & 0xff, MCA_DMA_REG_EXE);
-}
-
-/**
- * mca_get_dma_addr - load a 24bit DMA address
- * @dmanr: DMA channel
- *
- * Read the address register in the DMA controller. This has a 24bit
- * limitation (16Mb). The return is a bus address.
- */
-
-static inline unsigned int mca_get_dma_addr(unsigned int dmanr)
-{
- unsigned int addr;
-
- outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN);
- addr = inb(MCA_DMA_REG_EXE);
- addr |= inb(MCA_DMA_REG_EXE) << 8;
- addr |= inb(MCA_DMA_REG_EXE) << 16;
-
- return addr;
-}
-
-/**
- * mca_set_dma_count - load a 16bit transfer count
- * @dmanr: DMA channel
- * @count: count
- *
- * Set the DMA count for this channel. This can be up to 64Kbytes.
- * Setting a count of zero will not do what you expect.
- */
-
-static inline void mca_set_dma_count(unsigned int dmanr, unsigned int count)
-{
- count--; /* transfers one more than count -- correct for this */
-
- outb(MCA_DMA_FN_SET_COUNT | dmanr, MCA_DMA_REG_FN);
- outb(count & 0xff, MCA_DMA_REG_EXE);
- outb((count >> 8) & 0xff, MCA_DMA_REG_EXE);
-}
-
-/**
- * mca_get_dma_residue - get the remaining bytes to transfer
- * @dmanr: DMA channel
- *
- * This function returns the number of bytes left to transfer
- * on this DMA channel.
- */
-
-static inline unsigned int mca_get_dma_residue(unsigned int dmanr)
-{
- unsigned short count;
-
- outb(MCA_DMA_FN_GET_COUNT | dmanr, MCA_DMA_REG_FN);
- count = 1 + inb(MCA_DMA_REG_EXE);
- count += inb(MCA_DMA_REG_EXE) << 8;
-
- return count;
-}
-
-/**
- * mca_set_dma_io - set the port for an I/O transfer
- * @dmanr: DMA channel
- * @io_addr: an I/O port number
- *
- * Unlike the ISA bus DMA controllers the DMA on MCA bus can transfer
- * with an I/O port target.
- */
-
-static inline void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
-{
- /*
- * DMA from a port address -- set the io address
- */
-
- outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN);
- outb(io_addr & 0xff, MCA_DMA_REG_EXE);
- outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE);
-}
-
-/**
- * mca_set_dma_mode - set the DMA mode
- * @dmanr: DMA channel
- * @mode: mode to set
- *
- * The DMA controller supports several modes. The mode values you can
- * set are-
- *
- * %MCA_DMA_MODE_READ when reading from the DMA device.
- *
- * %MCA_DMA_MODE_WRITE to writing to the DMA device.
- *
- * %MCA_DMA_MODE_IO to do DMA to or from an I/O port.
- *
- * %MCA_DMA_MODE_16 to do 16bit transfers.
- */
-
-static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
-{
- outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN);
- outb(mode, MCA_DMA_REG_EXE);
-}
-
-#endif /* _ASM_X86_MCA_DMA_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 6aefb14cbbc..958b90f761e 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -1,8 +1,7 @@
#ifndef _ASM_X86_MCE_H
#define _ASM_X86_MCE_H
-#include <linux/types.h>
-#include <asm/ioctls.h>
+#include <uapi/asm/mce.h>
/*
* Machine Check support for x86
@@ -16,7 +15,8 @@
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
#define MCG_EXT_CNT_SHIFT 16
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
-#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
+#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
+#define MCG_ELOG_P (1ULL<<26) /* Extended error log supported */
/* MCG_STATUS register defines */
#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
@@ -34,6 +34,23 @@
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+/*
+ * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
+ * bits 15:0. But bit 12 is the 'F' bit, defined for corrected
+ * errors to indicate that errors are being filtered by hardware.
+ * We should mask out bit 12 when looking for specific signatures
+ * of uncorrected errors - so the F bit is deliberately skipped
+ * in this #define.
+ */
+#define MCACOD 0xefff /* MCA Error Code */
+
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */
+#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
+#define MCACOD_DATA 0x0134 /* Data Load */
+#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
+
/* MCi_MISC register defines */
#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
@@ -54,30 +71,17 @@
#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
#define MCJ_EXCEPTION 0x8 /* raise as exception */
-#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
-
-/* Fields are zero when not available */
-struct mce {
- __u64 status;
- __u64 misc;
- __u64 addr;
- __u64 mcgstatus;
- __u64 ip;
- __u64 tsc; /* cpu time stamp counter */
- __u64 time; /* wall time_t when error was detected */
- __u8 cpuvendor; /* cpu vendor as encoded in system.h */
- __u8 inject_flags; /* software inject flags */
- __u16 pad;
- __u32 cpuid; /* CPUID 1 EAX */
- __u8 cs; /* code segment */
- __u8 bank; /* machine check bank */
- __u8 cpu; /* cpu number; obsolete; use extcpu now */
- __u8 finished; /* entry is valid */
- __u32 extcpu; /* linux cpu number that detected the error */
- __u32 socketid; /* CPU socket ID */
- __u32 apicid; /* CPU initial apic ID */
- __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
-};
+#define MCJ_IRQ_BROADCAST 0x10 /* do IRQ broadcasting */
+
+#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
+
+/* Software defined banks */
+#define MCE_EXTENDED_BANK 128
+#define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0)
+#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
+
+#define MCE_LOG_LEN 32
+#define MCE_LOG_SIGNATURE "MACHINECHECK"
/*
* This structure contains all data related to the MCE log. Also
@@ -85,9 +89,6 @@ struct mce {
* debugging tools. Each entry is only valid when its finished flag
* is set.
*/
-
-#define MCE_LOG_LEN 32
-
struct mce_log {
char signature[12]; /* "MACHINECHECK" */
unsigned len; /* = MCE_LOG_LEN */
@@ -97,38 +98,28 @@ struct mce_log {
struct mce entry[MCE_LOG_LEN];
};
-#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
-
-#define MCE_LOG_SIGNATURE "MACHINECHECK"
-
-#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
-#define MCE_GET_LOG_LEN _IOR('M', 2, int)
-#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
-
-/* Software defined banks */
-#define MCE_EXTENDED_BANK 128
-#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
-
-#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
-#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
-#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
-#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
-#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
-#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
-#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
-#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
-
-
-#ifdef __KERNEL__
+struct mca_config {
+ bool dont_log_ce;
+ bool cmci_disabled;
+ bool ignore_ce;
+ bool disabled;
+ bool ser;
+ bool bios_cmci_threshold;
+ u8 banks;
+ s8 bootlog;
+ int tolerant;
+ int monarch_timeout;
+ int panic_timeout;
+ u32 rip_msr;
+};
+extern struct mca_config mca_cfg;
extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb);
#include <linux/percpu.h>
-#include <linux/init.h>
#include <linux/atomic.h>
-extern int mce_disabled;
extern int mce_p5_enabled;
#ifdef CONFIG_X86_MCE
@@ -151,7 +142,7 @@ static inline void enable_p5_mce(void) {}
void mce_setup(struct mce *m);
void mce_log(struct mce *m);
-extern struct device *mce_device[CONFIG_NR_CPUS];
+DECLARE_PER_CPU(struct device *, mce_device);
/*
* Maximum banks number.
@@ -161,18 +152,16 @@ extern struct device *mce_device[CONFIG_NR_CPUS];
#define MAX_NR_BANKS 32
#ifdef CONFIG_X86_MCE_INTEL
-extern int mce_cmci_disabled;
-extern int mce_ignore_ce;
void mce_intel_feature_init(struct cpuinfo_x86 *c);
void cmci_clear(void);
void cmci_reenable(void);
-void cmci_rediscover(int dying);
+void cmci_rediscover(void);
void cmci_recheck(void);
#else
static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
static inline void cmci_clear(void) {}
static inline void cmci_reenable(void) {}
-static inline void cmci_rediscover(int dying) {}
+static inline void cmci_rediscover(void) {}
static inline void cmci_recheck(void) {}
#endif
@@ -187,8 +176,6 @@ int mce_available(struct cpuinfo_x86 *c);
DECLARE_PER_CPU(unsigned, mce_exception_count);
DECLARE_PER_CPU(unsigned, mce_poll_count);
-extern atomic_t mce_entry;
-
typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
@@ -208,6 +195,9 @@ extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
const char __user *ubuf,
size_t usize, loff_t *off));
+/* Disable CMCI/polling for MCA bank claimed by firmware */
+extern void mce_disable_bank(int bank);
+
/*
* Exception handler
*/
@@ -234,6 +224,13 @@ void mce_log_therm_throt_event(__u64 status);
/* Interrupt Handler for core thermal thresholds */
extern int (*platform_thermal_notify)(__u64 msr_val);
+/* Interrupt Handler for package thermal thresholds */
+extern int (*platform_thermal_package_notify)(__u64 msr_val);
+
+/* Callback support of rate control, return true, if
+ * callback has rate control */
+extern bool (*platform_thermal_package_rate_control)(void);
+
#ifdef CONFIG_X86_THERMAL_VECTOR
extern void mcheck_intel_therm_init(void);
#else
@@ -248,5 +245,4 @@ struct cper_sec_mem_err;
extern void apei_mce_report_mem_error(int corrected,
struct cper_sec_mem_err *mem_err);
-#endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 4ebe157bf73..64dc362506b 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -1,6 +1,21 @@
#ifndef _ASM_X86_MICROCODE_H
#define _ASM_X86_MICROCODE_H
+#define native_rdmsr(msr, val1, val2) \
+do { \
+ u64 __val = native_read_msr((msr)); \
+ (void)((val1) = (u32)__val); \
+ (void)((val2) = (u32)(__val >> 32)); \
+} while (0)
+
+#define native_wrmsr(msr, low, high) \
+ native_write_msr(msr, low, high)
+
+#define native_wrmsrl(msr, val) \
+ native_write_msr((msr), \
+ (u32)((u64)(val)), \
+ (u32)((u64)(val) >> 32))
+
struct cpu_signature {
unsigned int sig;
unsigned int pf;
@@ -10,13 +25,14 @@ struct cpu_signature {
struct device;
enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
+extern bool dis_ucode_ldr;
struct microcode_ops {
enum ucode_state (*request_microcode_user) (int cpu,
const void __user *buf, size_t size);
- enum ucode_state (*request_microcode_fw) (int cpu,
- struct device *device);
+ enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
+ bool refresh_fw);
void (*microcode_fini_cpu) (int cpu);
@@ -49,12 +65,6 @@ static inline struct microcode_ops * __init init_intel_microcode(void)
#ifdef CONFIG_MICROCODE_AMD
extern struct microcode_ops * __init init_amd_microcode(void);
extern void __exit exit_amd_microcode(void);
-
-static inline void get_ucode_data(void *to, const u8 *from, size_t n)
-{
- memcpy(to, from, n);
-}
-
#else
static inline struct microcode_ops * __init init_amd_microcode(void)
{
@@ -63,4 +73,18 @@ static inline struct microcode_ops * __init init_amd_microcode(void)
static inline void __exit exit_amd_microcode(void) {}
#endif
+#ifdef CONFIG_MICROCODE_EARLY
+#define MAX_UCODE_COUNT 128
+extern void __init load_ucode_bsp(void);
+extern void load_ucode_ap(void);
+extern int __init save_microcode_in_initrd(void);
+#else
+static inline void __init load_ucode_bsp(void) {}
+static inline void load_ucode_ap(void) {}
+static inline int __init save_microcode_in_initrd(void)
+{
+ return 0;
+}
+#endif
+
#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
new file mode 100644
index 00000000000..b7b10b82d3e
--- /dev/null
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -0,0 +1,77 @@
+#ifndef _ASM_X86_MICROCODE_AMD_H
+#define _ASM_X86_MICROCODE_AMD_H
+
+#include <asm/microcode.h>
+
+#define UCODE_MAGIC 0x00414d44
+#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
+#define UCODE_UCODE_TYPE 0x00000001
+
+#define SECTION_HDR_SIZE 8
+#define CONTAINER_HDR_SZ 12
+
+struct equiv_cpu_entry {
+ u32 installed_cpu;
+ u32 fixed_errata_mask;
+ u32 fixed_errata_compare;
+ u16 equiv_cpu;
+ u16 res;
+} __attribute__((packed));
+
+struct microcode_header_amd {
+ u32 data_code;
+ u32 patch_id;
+ u16 mc_patch_data_id;
+ u8 mc_patch_data_len;
+ u8 init_flag;
+ u32 mc_patch_data_checksum;
+ u32 nb_dev_id;
+ u32 sb_dev_id;
+ u16 processor_rev_id;
+ u8 nb_rev_id;
+ u8 sb_rev_id;
+ u8 bios_api_rev;
+ u8 reserved1[3];
+ u32 match_reg[8];
+} __attribute__((packed));
+
+struct microcode_amd {
+ struct microcode_header_amd hdr;
+ unsigned int mpb[0];
+};
+
+static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
+ unsigned int sig)
+{
+ int i = 0;
+
+ if (!equiv_cpu_table)
+ return 0;
+
+ while (equiv_cpu_table[i].installed_cpu != 0) {
+ if (sig == equiv_cpu_table[i].installed_cpu)
+ return equiv_cpu_table[i].equiv_cpu;
+
+ i++;
+ }
+ return 0;
+}
+
+extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
+extern int apply_microcode_amd(int cpu);
+extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
+
+#define PATCH_MAX_SIZE PAGE_SIZE
+extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
+
+#ifdef CONFIG_MICROCODE_AMD_EARLY
+extern void __init load_ucode_amd_bsp(void);
+extern void load_ucode_amd_ap(void);
+extern int __init save_microcode_in_initrd_amd(void);
+#else
+static inline void __init load_ucode_amd_bsp(void) {}
+static inline void load_ucode_amd_ap(void) {}
+static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
+#endif
+
+#endif /* _ASM_X86_MICROCODE_AMD_H */
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
new file mode 100644
index 00000000000..9067166409b
--- /dev/null
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -0,0 +1,87 @@
+#ifndef _ASM_X86_MICROCODE_INTEL_H
+#define _ASM_X86_MICROCODE_INTEL_H
+
+#include <asm/microcode.h>
+
+struct microcode_header_intel {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int reserved[3];
+};
+
+struct microcode_intel {
+ struct microcode_header_intel hdr;
+ unsigned int bits[0];
+};
+
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int cksum;
+};
+
+struct extended_sigtable {
+ unsigned int count;
+ unsigned int cksum;
+ unsigned int reserved[3];
+ struct extended_signature sigs[0];
+};
+
+#define DEFAULT_UCODE_DATASIZE (2000)
+#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
+#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
+#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
+#define DWSIZE (sizeof(u32))
+
+#define get_totalsize(mc) \
+ (((struct microcode_intel *)mc)->hdr.totalsize ? \
+ ((struct microcode_intel *)mc)->hdr.totalsize : \
+ DEFAULT_UCODE_TOTALSIZE)
+
+#define get_datasize(mc) \
+ (((struct microcode_intel *)mc)->hdr.datasize ? \
+ ((struct microcode_intel *)mc)->hdr.datasize : DEFAULT_UCODE_DATASIZE)
+
+#define sigmatch(s1, s2, p1, p2) \
+ (((s1) == (s2)) && (((p1) & (p2)) || (((p1) == 0) && ((p2) == 0))))
+
+#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
+
+extern int
+get_matching_microcode(unsigned int csig, int cpf, void *mc, int rev);
+extern int microcode_sanity_check(void *mc, int print_err);
+extern int get_matching_sig(unsigned int csig, int cpf, void *mc, int rev);
+extern int
+update_match_revision(struct microcode_header_intel *mc_header, int rev);
+
+#ifdef CONFIG_MICROCODE_INTEL_EARLY
+extern void __init load_ucode_intel_bsp(void);
+extern void load_ucode_intel_ap(void);
+extern void show_ucode_info_early(void);
+extern int __init save_microcode_in_initrd_intel(void);
+#else
+static inline __init void load_ucode_intel_bsp(void) {}
+static inline void load_ucode_intel_ap(void) {}
+static inline void show_ucode_info_early(void) {}
+static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
+#endif
+
+#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
+extern int save_mc_for_early(u8 *mc);
+#else
+static inline int save_mc_for_early(u8 *mc)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_X86_MICROCODE_INTEL_H */
diff --git a/arch/x86/include/asm/misc.h b/arch/x86/include/asm/misc.h
new file mode 100644
index 00000000000..475f5bbc7f5
--- /dev/null
+++ b/arch/x86/include/asm/misc.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_MISC_H
+#define _ASM_X86_MISC_H
+
+int num_digits(int val);
+
+#endif /* _ASM_X86_MISC_H */
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
deleted file mode 100644
index 593e51d4643..00000000000
--- a/arch/x86/include/asm/mman.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _ASM_X86_MMAN_H
-#define _ASM_X86_MMAN_H
-
-#define MAP_32BIT 0x40 /* only give out 32bit addresses */
-
-#include <asm-generic/mman.h>
-
-#endif /* _ASM_X86_MMAN_H */
diff --git a/arch/x86/include/asm/mmconfig.h b/arch/x86/include/asm/mmconfig.h
index 9b119da1d10..04a3fed22cf 100644
--- a/arch/x86/include/asm/mmconfig.h
+++ b/arch/x86/include/asm/mmconfig.h
@@ -2,8 +2,8 @@
#define _ASM_X86_MMCONFIG_H
#ifdef CONFIG_PCI_MMCONFIG
-extern void __cpuinit fam10h_check_enable_mmcfg(void);
-extern void __cpuinit check_enable_amd_mmconf_dmi(void);
+extern void fam10h_check_enable_mmcfg(void);
+extern void check_enable_amd_mmconf_dmi(void);
#else
static inline void fam10h_check_enable_mmcfg(void) { }
static inline void check_enable_amd_mmconf_dmi(void) { }
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 5f55e696276..876e74e8eec 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -18,7 +18,7 @@ typedef struct {
#endif
struct mutex lock;
- void *vdso;
+ void __user *vdso;
} mm_context_t;
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 69021528b43..be12c534fd5 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -25,8 +25,8 @@ void destroy_context(struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
- if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
- percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+ if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif
}
@@ -37,30 +37,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (likely(prev != next)) {
#ifdef CONFIG_SMP
- percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
- percpu_write(cpu_tlbstate.active_mm, next);
+ this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ this_cpu_write(cpu_tlbstate.active_mm, next);
#endif
cpumask_set_cpu(cpu, mm_cpumask(next));
/* Re-load page tables */
load_cr3(next->pgd);
- /* stop flush ipis for the previous mm */
+ /* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
- /*
- * load the LDT, if the LDT is different:
- */
+ /* Load the LDT, if the LDT is different: */
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
- else {
- percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
- BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+ else {
+ this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
- if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
- /* We were in lazy tlb mode and leave_mm disabled
+ if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
+ /*
+ * On established mms, the mm_cpumask is only changed
+ * from irq context, from ptep_clear_flush() while in
+ * lazy tlb mode, and here. Irqs are blocked during
+ * schedule, protecting us from simultaneous changes.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ /*
+ * We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
diff --git a/arch/x86/include/asm/mmzone.h b/arch/x86/include/asm/mmzone.h
index 64217ea16a3..d497bc425ca 100644
--- a/arch/x86/include/asm/mmzone.h
+++ b/arch/x86/include/asm/mmzone.h
@@ -1,5 +1,5 @@
#ifdef CONFIG_X86_32
-# include "mmzone_32.h"
+# include <asm/mmzone_32.h>
#else
-# include "mmzone_64.h"
+# include <asm/mmzone_64.h>
#endif
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 55728e12147..1ec990bd7dc 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -11,15 +11,6 @@
#ifdef CONFIG_NUMA
extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[nid])
-
-#include <asm/numaq.h>
-
-extern void resume_map_numa_kva(pgd_t *pgd);
-
-#else /* !CONFIG_NUMA */
-
-static inline void resume_map_numa_kva(pgd_t *pgd) {}
-
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DISCONTIGMEM
@@ -61,10 +52,4 @@ static inline int pfn_valid(int pfn)
#endif /* CONFIG_DISCONTIGMEM */
-#ifdef CONFIG_NEED_MULTIPLE_NODES
-/* always use node 0 for bootmem on this numa platform */
-#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
- (NODE_DATA(0)->bdata)
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
-
#endif /* _ASM_X86_MMZONE_32_H */
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 9eae7752ae9..e3b7819caee 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -5,8 +5,6 @@
#ifdef CONFIG_X86_64
/* X86_64 does not define MODULE_PROC_FAMILY */
-#elif defined CONFIG_M386
-#define MODULE_PROC_FAMILY "386 "
#elif defined CONFIG_M486
#define MODULE_PROC_FAMILY "486 "
#elif defined CONFIG_M586
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 9c7d95f6174..f5a61795673 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -1,7 +1,6 @@
#ifndef _ASM_X86_MPSPEC_H
#define _ASM_X86_MPSPEC_H
-#include <linux/init.h>
#include <asm/mpspec_def.h>
#include <asm/x86_init.h>
@@ -26,12 +25,6 @@ extern int pic_mode;
extern unsigned int def_to_bigsmp;
-#ifdef CONFIG_X86_NUMAQ
-extern int mp_bus_id_to_node[MAX_MP_BUSSES];
-extern int mp_bus_id_to_local[MAX_MP_BUSSES];
-extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
-#endif
-
#else /* CONFIG_X86_64: */
#define MAX_MP_BUSSES 256
@@ -40,7 +33,7 @@ extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
#endif /* CONFIG_X86_64 */
-#if defined(CONFIG_MCA) || defined(CONFIG_EISA)
+#ifdef CONFIG_EISA
extern int mp_bus_id_to_type[MAX_MP_BUSSES];
#endif
@@ -94,7 +87,7 @@ static inline void early_reserve_e820_mpc_new(void) { }
#define default_get_smp_config x86_init_uint_noop
#endif
-void __cpuinit generic_processor_info(int apicid, int version);
+int generic_processor_info(int apicid, int version);
#ifdef CONFIG_ACPI
extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index c0a955a9a08..b31f8c09827 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -84,7 +84,7 @@ struct mpc_bus {
#define BUSTYPE_EISA "EISA"
#define BUSTYPE_ISA "ISA"
#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
-#define BUSTYPE_MCA "MCA"
+#define BUSTYPE_MCA "MCA" /* Obsolete */
#define BUSTYPE_VL "VL" /* Local bus */
#define BUSTYPE_PCI "PCI"
#define BUSTYPE_PCMCIA "PCMCIA"
@@ -169,6 +169,5 @@ enum mp_bustype {
MP_BUS_ISA = 1,
MP_BUS_EISA,
MP_BUS_PCI,
- MP_BUS_MCA,
};
#endif /* _ASM_X86_MPSPEC_DEF_H */
diff --git a/arch/x86/include/asm/mrst-vrtc.h b/arch/x86/include/asm/mrst-vrtc.h
deleted file mode 100644
index 73668abdbed..00000000000
--- a/arch/x86/include/asm/mrst-vrtc.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _MRST_VRTC_H
-#define _MRST_VRTC_H
-
-extern unsigned char vrtc_cmos_read(unsigned char reg);
-extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
-extern unsigned long vrtc_get_time(void);
-extern int vrtc_set_mmss(unsigned long nowtime);
-
-#endif
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
deleted file mode 100644
index 0a0a9546043..00000000000
--- a/arch/x86/include/asm/mrst.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * mrst.h: Intel Moorestown platform specific setup code
- *
- * (C) Copyright 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-#ifndef _ASM_X86_MRST_H
-#define _ASM_X86_MRST_H
-
-#include <linux/sfi.h>
-
-extern int pci_mrst_init(void);
-extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
-extern int sfi_mrtc_num;
-extern struct sfi_rtc_table_entry sfi_mrtc_array[];
-
-/*
- * Medfield is the follow-up of Moorestown, it combines two chip solution into
- * one. Other than that it also added always-on and constant tsc and lapic
- * timers. Medfield is the platform name, and the chip name is called Penwell
- * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
- * identified via MSRs.
- */
-enum mrst_cpu_type {
- MRST_CPU_CHIP_LINCROFT = 1,
- MRST_CPU_CHIP_PENWELL,
-};
-
-extern enum mrst_cpu_type __mrst_cpu_chip;
-
-#ifdef CONFIG_X86_INTEL_MID
-
-static inline enum mrst_cpu_type mrst_identify_cpu(void)
-{
- return __mrst_cpu_chip;
-}
-
-#else /* !CONFIG_X86_INTEL_MID */
-
-#define mrst_identify_cpu() (0)
-
-#endif /* !CONFIG_X86_INTEL_MID */
-
-enum mrst_timer_options {
- MRST_TIMER_DEFAULT,
- MRST_TIMER_APBT_ONLY,
- MRST_TIMER_LAPIC_APBT,
-};
-
-extern enum mrst_timer_options mrst_timer_options;
-
-/*
- * Penwell uses spread spectrum clock, so the freq number is not exactly
- * the same as reported by MSR based on SDM.
- */
-#define PENWELL_FSB_FREQ_83SKU 83200
-#define PENWELL_FSB_FREQ_100SKU 99840
-
-#define SFI_MTMR_MAX_NUM 8
-#define SFI_MRTC_MAX 8
-
-extern struct console early_mrst_console;
-extern void mrst_early_console_init(void);
-
-extern struct console early_hsu_console;
-extern void hsu_early_console_init(const char *);
-
-extern void intel_scu_devices_create(void);
-extern void intel_scu_devices_destroy(void);
-
-/* VRTC timer */
-#define MRST_VRTC_MAP_SZ (1024)
-/*#define MRST_VRTC_PGOFFSET (0xc00) */
-
-extern void mrst_rtc_init(void);
-
-#endif /* _ASM_X86_MRST_H */
diff --git a/arch/x86/include/asm/msgbuf.h b/arch/x86/include/asm/msgbuf.h
deleted file mode 100644
index 809134c644a..00000000000
--- a/arch/x86/include/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/msgbuf.h>
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 79ce5685ab6..c163215abb9 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -2,6 +2,7 @@
#define _ASM_X86_MSHYPER_H
#include <linux/types.h>
+#include <linux/interrupt.h>
#include <asm/hyperv.h>
struct ms_hyperv_info {
@@ -11,4 +12,12 @@ struct ms_hyperv_info {
extern struct ms_hyperv_info ms_hyperv;
+void hyperv_callback_vector(void);
+#ifdef CONFIG_TRACING
+#define trace_hyperv_callback_vector hyperv_callback_vector
+#endif
+void hyperv_vector_handler(struct pt_regs *regs);
+void hv_setup_vmbus_irq(void (*handler)(void));
+void hv_remove_vmbus_irq(void);
+
#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
deleted file mode 100644
index a6962d9161a..00000000000
--- a/arch/x86/include/asm/msr-index.h
+++ /dev/null
@@ -1,465 +0,0 @@
-#ifndef _ASM_X86_MSR_INDEX_H
-#define _ASM_X86_MSR_INDEX_H
-
-/* CPU model specific register (MSR) numbers */
-
-/* x86-64 specific MSRs */
-#define MSR_EFER 0xc0000080 /* extended feature register */
-#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
-#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
-#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
-#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
-#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
-#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
-#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
-#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
-
-/* EFER bits: */
-#define _EFER_SCE 0 /* SYSCALL/SYSRET */
-#define _EFER_LME 8 /* Long mode enable */
-#define _EFER_LMA 10 /* Long mode active (read-only) */
-#define _EFER_NX 11 /* No execute enable */
-#define _EFER_SVME 12 /* Enable virtualization */
-#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
-#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
-
-#define EFER_SCE (1<<_EFER_SCE)
-#define EFER_LME (1<<_EFER_LME)
-#define EFER_LMA (1<<_EFER_LMA)
-#define EFER_NX (1<<_EFER_NX)
-#define EFER_SVME (1<<_EFER_SVME)
-#define EFER_LMSLE (1<<_EFER_LMSLE)
-#define EFER_FFXSR (1<<_EFER_FFXSR)
-
-/* Intel MSRs. Some also available on other CPUs */
-#define MSR_IA32_PERFCTR0 0x000000c1
-#define MSR_IA32_PERFCTR1 0x000000c2
-#define MSR_FSB_FREQ 0x000000cd
-
-#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
-#define NHM_C3_AUTO_DEMOTE (1UL << 25)
-#define NHM_C1_AUTO_DEMOTE (1UL << 26)
-#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
-
-#define MSR_MTRRcap 0x000000fe
-#define MSR_IA32_BBL_CR_CTL 0x00000119
-#define MSR_IA32_BBL_CR_CTL3 0x0000011e
-
-#define MSR_IA32_SYSENTER_CS 0x00000174
-#define MSR_IA32_SYSENTER_ESP 0x00000175
-#define MSR_IA32_SYSENTER_EIP 0x00000176
-
-#define MSR_IA32_MCG_CAP 0x00000179
-#define MSR_IA32_MCG_STATUS 0x0000017a
-#define MSR_IA32_MCG_CTL 0x0000017b
-
-#define MSR_OFFCORE_RSP_0 0x000001a6
-#define MSR_OFFCORE_RSP_1 0x000001a7
-
-#define MSR_IA32_PEBS_ENABLE 0x000003f1
-#define MSR_IA32_DS_AREA 0x00000600
-#define MSR_IA32_PERF_CAPABILITIES 0x00000345
-
-#define MSR_MTRRfix64K_00000 0x00000250
-#define MSR_MTRRfix16K_80000 0x00000258
-#define MSR_MTRRfix16K_A0000 0x00000259
-#define MSR_MTRRfix4K_C0000 0x00000268
-#define MSR_MTRRfix4K_C8000 0x00000269
-#define MSR_MTRRfix4K_D0000 0x0000026a
-#define MSR_MTRRfix4K_D8000 0x0000026b
-#define MSR_MTRRfix4K_E0000 0x0000026c
-#define MSR_MTRRfix4K_E8000 0x0000026d
-#define MSR_MTRRfix4K_F0000 0x0000026e
-#define MSR_MTRRfix4K_F8000 0x0000026f
-#define MSR_MTRRdefType 0x000002ff
-
-#define MSR_IA32_CR_PAT 0x00000277
-
-#define MSR_IA32_DEBUGCTLMSR 0x000001d9
-#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
-#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
-#define MSR_IA32_LASTINTFROMIP 0x000001dd
-#define MSR_IA32_LASTINTTOIP 0x000001de
-
-/* DEBUGCTLMSR bits (others vary by model): */
-#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
-#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
-#define DEBUGCTLMSR_TR (1UL << 6)
-#define DEBUGCTLMSR_BTS (1UL << 7)
-#define DEBUGCTLMSR_BTINT (1UL << 8)
-#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
-#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
-#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
-
-#define MSR_IA32_MC0_CTL 0x00000400
-#define MSR_IA32_MC0_STATUS 0x00000401
-#define MSR_IA32_MC0_ADDR 0x00000402
-#define MSR_IA32_MC0_MISC 0x00000403
-
-#define MSR_AMD64_MC0_MASK 0xc0010044
-
-#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
-#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
-#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
-#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
-
-#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
-
-/* These are consecutive and not in the normal 4er MCE bank block */
-#define MSR_IA32_MC0_CTL2 0x00000280
-#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
-
-#define MSR_P6_PERFCTR0 0x000000c1
-#define MSR_P6_PERFCTR1 0x000000c2
-#define MSR_P6_EVNTSEL0 0x00000186
-#define MSR_P6_EVNTSEL1 0x00000187
-
-/* AMD64 MSRs. Not complete. See the architecture manual for a more
- complete list. */
-
-#define MSR_AMD64_PATCH_LEVEL 0x0000008b
-#define MSR_AMD64_TSC_RATIO 0xc0000104
-#define MSR_AMD64_NB_CFG 0xc001001f
-#define MSR_AMD64_PATCH_LOADER 0xc0010020
-#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
-#define MSR_AMD64_OSVW_STATUS 0xc0010141
-#define MSR_AMD64_DC_CFG 0xc0011022
-#define MSR_AMD64_IBSFETCHCTL 0xc0011030
-#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
-#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
-#define MSR_AMD64_IBSOPCTL 0xc0011033
-#define MSR_AMD64_IBSOPRIP 0xc0011034
-#define MSR_AMD64_IBSOPDATA 0xc0011035
-#define MSR_AMD64_IBSOPDATA2 0xc0011036
-#define MSR_AMD64_IBSOPDATA3 0xc0011037
-#define MSR_AMD64_IBSDCLINAD 0xc0011038
-#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
-#define MSR_AMD64_IBSCTL 0xc001103a
-#define MSR_AMD64_IBSBRTARGET 0xc001103b
-
-/* Fam 15h MSRs */
-#define MSR_F15H_PERF_CTL 0xc0010200
-#define MSR_F15H_PERF_CTR 0xc0010201
-
-/* Fam 10h MSRs */
-#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
-#define FAM10H_MMIO_CONF_ENABLE (1<<0)
-#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
-#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
-#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
-#define FAM10H_MMIO_CONF_BASE_SHIFT 20
-#define MSR_FAM10H_NODE_ID 0xc001100c
-
-/* K8 MSRs */
-#define MSR_K8_TOP_MEM1 0xc001001a
-#define MSR_K8_TOP_MEM2 0xc001001d
-#define MSR_K8_SYSCFG 0xc0010010
-#define MSR_K8_INT_PENDING_MSG 0xc0010055
-/* C1E active bits in int pending message */
-#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
-#define MSR_K8_TSEG_ADDR 0xc0010112
-#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
-#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
-#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
-
-/* K7 MSRs */
-#define MSR_K7_EVNTSEL0 0xc0010000
-#define MSR_K7_PERFCTR0 0xc0010004
-#define MSR_K7_EVNTSEL1 0xc0010001
-#define MSR_K7_PERFCTR1 0xc0010005
-#define MSR_K7_EVNTSEL2 0xc0010002
-#define MSR_K7_PERFCTR2 0xc0010006
-#define MSR_K7_EVNTSEL3 0xc0010003
-#define MSR_K7_PERFCTR3 0xc0010007
-#define MSR_K7_CLK_CTL 0xc001001b
-#define MSR_K7_HWCR 0xc0010015
-#define MSR_K7_FID_VID_CTL 0xc0010041
-#define MSR_K7_FID_VID_STATUS 0xc0010042
-
-/* K6 MSRs */
-#define MSR_K6_WHCR 0xc0000082
-#define MSR_K6_UWCCR 0xc0000085
-#define MSR_K6_EPMR 0xc0000086
-#define MSR_K6_PSOR 0xc0000087
-#define MSR_K6_PFIR 0xc0000088
-
-/* Centaur-Hauls/IDT defined MSRs. */
-#define MSR_IDT_FCR1 0x00000107
-#define MSR_IDT_FCR2 0x00000108
-#define MSR_IDT_FCR3 0x00000109
-#define MSR_IDT_FCR4 0x0000010a
-
-#define MSR_IDT_MCR0 0x00000110
-#define MSR_IDT_MCR1 0x00000111
-#define MSR_IDT_MCR2 0x00000112
-#define MSR_IDT_MCR3 0x00000113
-#define MSR_IDT_MCR4 0x00000114
-#define MSR_IDT_MCR5 0x00000115
-#define MSR_IDT_MCR6 0x00000116
-#define MSR_IDT_MCR7 0x00000117
-#define MSR_IDT_MCR_CTRL 0x00000120
-
-/* VIA Cyrix defined MSRs*/
-#define MSR_VIA_FCR 0x00001107
-#define MSR_VIA_LONGHAUL 0x0000110a
-#define MSR_VIA_RNG 0x0000110b
-#define MSR_VIA_BCR2 0x00001147
-
-/* Transmeta defined MSRs */
-#define MSR_TMTA_LONGRUN_CTRL 0x80868010
-#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
-#define MSR_TMTA_LRTI_READOUT 0x80868018
-#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
-
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR 0x00000000
-#define MSR_IA32_P5_MC_TYPE 0x00000001
-#define MSR_IA32_TSC 0x00000010
-#define MSR_IA32_PLATFORM_ID 0x00000017
-#define MSR_IA32_EBL_CR_POWERON 0x0000002a
-#define MSR_EBC_FREQUENCY_ID 0x0000002c
-#define MSR_IA32_FEATURE_CONTROL 0x0000003a
-
-#define FEATURE_CONTROL_LOCKED (1<<0)
-#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
-#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
-
-#define MSR_IA32_APICBASE 0x0000001b
-#define MSR_IA32_APICBASE_BSP (1<<8)
-#define MSR_IA32_APICBASE_ENABLE (1<<11)
-#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-
-#define MSR_IA32_TSCDEADLINE 0x000006e0
-
-#define MSR_IA32_UCODE_WRITE 0x00000079
-#define MSR_IA32_UCODE_REV 0x0000008b
-
-#define MSR_IA32_PERF_STATUS 0x00000198
-#define MSR_IA32_PERF_CTL 0x00000199
-
-#define MSR_IA32_MPERF 0x000000e7
-#define MSR_IA32_APERF 0x000000e8
-
-#define MSR_IA32_THERM_CONTROL 0x0000019a
-#define MSR_IA32_THERM_INTERRUPT 0x0000019b
-
-#define THERM_INT_HIGH_ENABLE (1 << 0)
-#define THERM_INT_LOW_ENABLE (1 << 1)
-#define THERM_INT_PLN_ENABLE (1 << 24)
-
-#define MSR_IA32_THERM_STATUS 0x0000019c
-
-#define THERM_STATUS_PROCHOT (1 << 0)
-#define THERM_STATUS_POWER_LIMIT (1 << 10)
-
-#define MSR_THERM2_CTL 0x0000019d
-
-#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16)
-
-#define MSR_IA32_MISC_ENABLE 0x000001a0
-
-#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
-
-#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
-#define ENERGY_PERF_BIAS_PERFORMANCE 0
-#define ENERGY_PERF_BIAS_NORMAL 6
-#define ENERGY_PERF_BIAS_POWERSAVE 15
-
-#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
-
-#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0)
-#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10)
-
-#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2
-
-#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0)
-#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
-#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
-
-/* Thermal Thresholds Support */
-#define THERM_INT_THRESHOLD0_ENABLE (1 << 15)
-#define THERM_SHIFT_THRESHOLD0 8
-#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0)
-#define THERM_INT_THRESHOLD1_ENABLE (1 << 23)
-#define THERM_SHIFT_THRESHOLD1 16
-#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1)
-#define THERM_STATUS_THRESHOLD0 (1 << 6)
-#define THERM_LOG_THRESHOLD0 (1 << 7)
-#define THERM_STATUS_THRESHOLD1 (1 << 8)
-#define THERM_LOG_THRESHOLD1 (1 << 9)
-
-/* MISC_ENABLE bits: architectural */
-#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
-#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
-#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7)
-#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11)
-#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12)
-#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16)
-#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
-#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22)
-#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23)
-#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34)
-
-/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
-#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2)
-#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3)
-#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4)
-#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6)
-#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8)
-#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9)
-#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10)
-#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10)
-#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13)
-#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19)
-#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20)
-#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24)
-#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37)
-#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38)
-#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39)
-
-/* P4/Xeon+ specific */
-#define MSR_IA32_MCG_EAX 0x00000180
-#define MSR_IA32_MCG_EBX 0x00000181
-#define MSR_IA32_MCG_ECX 0x00000182
-#define MSR_IA32_MCG_EDX 0x00000183
-#define MSR_IA32_MCG_ESI 0x00000184
-#define MSR_IA32_MCG_EDI 0x00000185
-#define MSR_IA32_MCG_EBP 0x00000186
-#define MSR_IA32_MCG_ESP 0x00000187
-#define MSR_IA32_MCG_EFLAGS 0x00000188
-#define MSR_IA32_MCG_EIP 0x00000189
-#define MSR_IA32_MCG_RESERVED 0x0000018a
-
-/* Pentium IV performance counter MSRs */
-#define MSR_P4_BPU_PERFCTR0 0x00000300
-#define MSR_P4_BPU_PERFCTR1 0x00000301
-#define MSR_P4_BPU_PERFCTR2 0x00000302
-#define MSR_P4_BPU_PERFCTR3 0x00000303
-#define MSR_P4_MS_PERFCTR0 0x00000304
-#define MSR_P4_MS_PERFCTR1 0x00000305
-#define MSR_P4_MS_PERFCTR2 0x00000306
-#define MSR_P4_MS_PERFCTR3 0x00000307
-#define MSR_P4_FLAME_PERFCTR0 0x00000308
-#define MSR_P4_FLAME_PERFCTR1 0x00000309
-#define MSR_P4_FLAME_PERFCTR2 0x0000030a
-#define MSR_P4_FLAME_PERFCTR3 0x0000030b
-#define MSR_P4_IQ_PERFCTR0 0x0000030c
-#define MSR_P4_IQ_PERFCTR1 0x0000030d
-#define MSR_P4_IQ_PERFCTR2 0x0000030e
-#define MSR_P4_IQ_PERFCTR3 0x0000030f
-#define MSR_P4_IQ_PERFCTR4 0x00000310
-#define MSR_P4_IQ_PERFCTR5 0x00000311
-#define MSR_P4_BPU_CCCR0 0x00000360
-#define MSR_P4_BPU_CCCR1 0x00000361
-#define MSR_P4_BPU_CCCR2 0x00000362
-#define MSR_P4_BPU_CCCR3 0x00000363
-#define MSR_P4_MS_CCCR0 0x00000364
-#define MSR_P4_MS_CCCR1 0x00000365
-#define MSR_P4_MS_CCCR2 0x00000366
-#define MSR_P4_MS_CCCR3 0x00000367
-#define MSR_P4_FLAME_CCCR0 0x00000368
-#define MSR_P4_FLAME_CCCR1 0x00000369
-#define MSR_P4_FLAME_CCCR2 0x0000036a
-#define MSR_P4_FLAME_CCCR3 0x0000036b
-#define MSR_P4_IQ_CCCR0 0x0000036c
-#define MSR_P4_IQ_CCCR1 0x0000036d
-#define MSR_P4_IQ_CCCR2 0x0000036e
-#define MSR_P4_IQ_CCCR3 0x0000036f
-#define MSR_P4_IQ_CCCR4 0x00000370
-#define MSR_P4_IQ_CCCR5 0x00000371
-#define MSR_P4_ALF_ESCR0 0x000003ca
-#define MSR_P4_ALF_ESCR1 0x000003cb
-#define MSR_P4_BPU_ESCR0 0x000003b2
-#define MSR_P4_BPU_ESCR1 0x000003b3
-#define MSR_P4_BSU_ESCR0 0x000003a0
-#define MSR_P4_BSU_ESCR1 0x000003a1
-#define MSR_P4_CRU_ESCR0 0x000003b8
-#define MSR_P4_CRU_ESCR1 0x000003b9
-#define MSR_P4_CRU_ESCR2 0x000003cc
-#define MSR_P4_CRU_ESCR3 0x000003cd
-#define MSR_P4_CRU_ESCR4 0x000003e0
-#define MSR_P4_CRU_ESCR5 0x000003e1
-#define MSR_P4_DAC_ESCR0 0x000003a8
-#define MSR_P4_DAC_ESCR1 0x000003a9
-#define MSR_P4_FIRM_ESCR0 0x000003a4
-#define MSR_P4_FIRM_ESCR1 0x000003a5
-#define MSR_P4_FLAME_ESCR0 0x000003a6
-#define MSR_P4_FLAME_ESCR1 0x000003a7
-#define MSR_P4_FSB_ESCR0 0x000003a2
-#define MSR_P4_FSB_ESCR1 0x000003a3
-#define MSR_P4_IQ_ESCR0 0x000003ba
-#define MSR_P4_IQ_ESCR1 0x000003bb
-#define MSR_P4_IS_ESCR0 0x000003b4
-#define MSR_P4_IS_ESCR1 0x000003b5
-#define MSR_P4_ITLB_ESCR0 0x000003b6
-#define MSR_P4_ITLB_ESCR1 0x000003b7
-#define MSR_P4_IX_ESCR0 0x000003c8
-#define MSR_P4_IX_ESCR1 0x000003c9
-#define MSR_P4_MOB_ESCR0 0x000003aa
-#define MSR_P4_MOB_ESCR1 0x000003ab
-#define MSR_P4_MS_ESCR0 0x000003c0
-#define MSR_P4_MS_ESCR1 0x000003c1
-#define MSR_P4_PMH_ESCR0 0x000003ac
-#define MSR_P4_PMH_ESCR1 0x000003ad
-#define MSR_P4_RAT_ESCR0 0x000003bc
-#define MSR_P4_RAT_ESCR1 0x000003bd
-#define MSR_P4_SAAT_ESCR0 0x000003ae
-#define MSR_P4_SAAT_ESCR1 0x000003af
-#define MSR_P4_SSU_ESCR0 0x000003be
-#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
-
-#define MSR_P4_TBPU_ESCR0 0x000003c2
-#define MSR_P4_TBPU_ESCR1 0x000003c3
-#define MSR_P4_TC_ESCR0 0x000003c4
-#define MSR_P4_TC_ESCR1 0x000003c5
-#define MSR_P4_U2L_ESCR0 0x000003b0
-#define MSR_P4_U2L_ESCR1 0x000003b1
-
-#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
-
-/* Intel Core-based CPU performance counters */
-#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
-#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
-#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
-#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
-#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
-#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
-#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
-
-/* Geode defined MSRs */
-#define MSR_GEODE_BUSCONT_CONF0 0x00001900
-
-/* Intel VT MSRs */
-#define MSR_IA32_VMX_BASIC 0x00000480
-#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
-#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
-#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
-#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
-#define MSR_IA32_VMX_MISC 0x00000485
-#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
-#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
-#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
-#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
-#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
-#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
-#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
-#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
-#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
-#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
-#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
-
-/* VMX_BASIC bits and bitmasks */
-#define VMX_BASIC_VMCS_SIZE_SHIFT 32
-#define VMX_BASIC_64 0x0001000000000000LLU
-#define VMX_BASIC_MEM_TYPE_SHIFT 50
-#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
-#define VMX_BASIC_MEM_TYPE_WB 6LLU
-#define VMX_BASIC_INOUT 0x0040000000000000LLU
-
-/* AMD-V MSRs */
-
-#define MSR_VM_CR 0xc0010114
-#define MSR_VM_IGNNE 0xc0010115
-#define MSR_VM_HSAVE_PA 0xc0010117
-
-#endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 95203d40ffd..de36f22eb0b 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -1,18 +1,10 @@
#ifndef _ASM_X86_MSR_H
#define _ASM_X86_MSR_H
-#include <asm/msr-index.h>
+#include <uapi/asm/msr.h>
#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
-#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
-
-#ifdef __KERNEL__
-
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/cpumask.h>
@@ -115,8 +107,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
extern unsigned long long native_read_tsc(void);
-extern int native_rdmsr_safe_regs(u32 regs[8]);
-extern int native_wrmsr_safe_regs(u32 regs[8]);
+extern int rdmsr_safe_regs(u32 regs[8]);
+extern int wrmsr_safe_regs(u32 regs[8]);
static __always_inline unsigned long long __native_read_tsc(void)
{
@@ -145,11 +137,11 @@ static inline unsigned long long native_read_pmc(int counter)
* pointer indirection), this allows gcc to optimize better
*/
-#define rdmsr(msr, val1, val2) \
+#define rdmsr(msr, low, high) \
do { \
u64 __val = native_read_msr((msr)); \
- (void)((val1) = (u32)__val); \
- (void)((val2) = (u32)(__val >> 32)); \
+ (void)((low) = (u32)__val); \
+ (void)((high) = (u32)(__val >> 32)); \
} while (0)
static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
@@ -169,20 +161,13 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
return native_write_msr_safe(msr, low, high);
}
-/*
- * rdmsr with exception handling.
- *
- * Please note that the exception handling works only after we've
- * switched to the "smart" #GP handler in trap_init() which knows about
- * exception tables - using this macro earlier than that causes machine
- * hangs on boxes which do not implement the @msr in the first argument.
- */
-#define rdmsr_safe(msr, p1, p2) \
+/* rdmsr with exception handling */
+#define rdmsr_safe(msr, low, high) \
({ \
int __err; \
u64 __val = native_read_msr_safe((msr), &__err); \
- (*p1) = (u32)__val; \
- (*p2) = (u32)(__val >> 32); \
+ (*low) = (u32)__val; \
+ (*high) = (u32)(__val >> 32); \
__err; \
})
@@ -194,43 +179,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
return err;
}
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
-{
- u32 gprs[8] = { 0 };
- int err;
-
- gprs[1] = msr;
- gprs[7] = 0x9c5a203a;
-
- err = native_rdmsr_safe_regs(gprs);
-
- *p = gprs[0] | ((u64)gprs[2] << 32);
-
- return err;
-}
-
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
-{
- u32 gprs[8] = { 0 };
-
- gprs[0] = (u32)val;
- gprs[1] = msr;
- gprs[2] = val >> 32;
- gprs[7] = 0x9c5a203a;
-
- return native_wrmsr_safe_regs(gprs);
-}
-
-static inline int rdmsr_safe_regs(u32 regs[8])
-{
- return native_rdmsr_safe_regs(regs);
-}
-
-static inline int wrmsr_safe_regs(u32 regs[8])
-{
- return native_wrmsr_safe_regs(regs);
-}
-
#define rdtscl(low) \
((low) = (u32)__native_read_tsc())
@@ -244,6 +192,8 @@ do { \
(high) = (u32)(_l >> 32); \
} while (0)
+#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
+
#define rdtscp(low, high, aux) \
do { \
unsigned long long _val = native_read_tscp(&(aux)); \
@@ -255,24 +205,29 @@ do { \
#endif /* !CONFIG_PARAVIRT */
-
-#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
+#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
(u32)((val) >> 32))
-#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
+#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
struct msr *msrs_alloc(void);
void msrs_free(struct msr *msrs);
+int msr_set_bit(u32 msr, u8 bit);
+int msr_clear_bit(u32 msr, u8 bit);
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
#else /* CONFIG_SMP */
@@ -286,6 +241,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
wrmsr(msr_no, l, h);
return 0;
}
+static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+ rdmsrl(msr_no, *q);
+ return 0;
+}
+static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+ wrmsrl(msr_no, q);
+ return 0;
+}
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr *msrs)
{
@@ -305,6 +270,14 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
return wrmsr_safe(msr_no, l, h);
}
+static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+ return rdmsrl_safe(msr_no, q);
+}
+static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+ return wrmsrl_safe(msr_no, q);
+}
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
return rdmsr_safe_regs(regs);
@@ -314,6 +287,5 @@ static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
return wrmsr_safe_regs(regs);
}
#endif /* CONFIG_SMP */
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 4365ffdb461..f768f629841 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -23,91 +23,13 @@
#ifndef _ASM_X86_MTRR_H
#define _ASM_X86_MTRR_H
-#include <linux/types.h>
-#include <linux/ioctl.h>
-#include <linux/errno.h>
+#include <uapi/asm/mtrr.h>
-#define MTRR_IOCTL_BASE 'M'
-struct mtrr_sentry {
- unsigned long base; /* Base address */
- unsigned int size; /* Size of region */
- unsigned int type; /* Type of region */
-};
-
-/* Warning: this structure has a different order from i386
- on x86-64. The 32bit emulation code takes care of that.
- But you need to use this for 64bit, otherwise your X server
- will break. */
-
-#ifdef __i386__
-struct mtrr_gentry {
- unsigned int regnum; /* Register number */
- unsigned long base; /* Base address */
- unsigned int size; /* Size of region */
- unsigned int type; /* Type of region */
-};
-
-#else /* __i386__ */
-
-struct mtrr_gentry {
- unsigned long base; /* Base address */
- unsigned int size; /* Size of region */
- unsigned int regnum; /* Register number */
- unsigned int type; /* Type of region */
-};
-#endif /* !__i386__ */
-
-struct mtrr_var_range {
- __u32 base_lo;
- __u32 base_hi;
- __u32 mask_lo;
- __u32 mask_hi;
-};
-
-/* In the Intel processor's MTRR interface, the MTRR type is always held in
- an 8 bit field: */
-typedef __u8 mtrr_type;
-
-#define MTRR_NUM_FIXED_RANGES 88
-#define MTRR_MAX_VAR_RANGES 256
-
-struct mtrr_state_type {
- struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
- mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
- unsigned char enabled;
- unsigned char have_fixed;
- mtrr_type def_type;
-};
-
-#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
-#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
-
-/* These are the various ioctls */
-#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
-#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
-#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
-#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
-#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
-#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
-#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
-#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
-#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
-#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
-
-/* These are the region types */
-#define MTRR_TYPE_UNCACHABLE 0
-#define MTRR_TYPE_WRCOMB 1
-/*#define MTRR_TYPE_ 2*/
-/*#define MTRR_TYPE_ 3*/
-#define MTRR_TYPE_WRTHROUGH 4
-#define MTRR_TYPE_WRPROT 5
-#define MTRR_TYPE_WRBACK 6
-#define MTRR_NUM_TYPES 7
-
-#ifdef __KERNEL__
-
-/* The following functions are for use by other drivers */
+/*
+ * The following functions are for use by other drivers that cannot use
+ * arch_phys_wc_add and arch_phys_wc_del.
+ */
# ifdef CONFIG_MTRR
extern u8 mtrr_type_lookup(u64 addr, u64 end);
extern void mtrr_save_fixed_ranges(void *);
@@ -126,6 +48,7 @@ extern void mtrr_aps_init(void);
extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void);
+extern int phys_wc_to_mtrr_index(int handle);
# else
static inline u8 mtrr_type_lookup(u64 addr, u64 end)
{
@@ -161,6 +84,10 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
+static inline int phys_wc_to_mtrr_index(int handle)
+{
+ return -1;
+}
#define mtrr_ap_init() do {} while (0)
#define mtrr_bp_init() do {} while (0)
@@ -200,6 +127,4 @@ struct mtrr_gentry32 {
_IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
#endif /* CONFIG_COMPAT */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_MTRR_H */
diff --git a/arch/x86/include/asm/mutex.h b/arch/x86/include/asm/mutex.h
index a731b9c573a..7d3a4827539 100644
--- a/arch/x86/include/asm/mutex.h
+++ b/arch/x86/include/asm/mutex.h
@@ -1,5 +1,5 @@
#ifdef CONFIG_X86_32
-# include "mutex_32.h"
+# include <asm/mutex_32.h>
#else
-# include "mutex_64.h"
+# include <asm/mutex_64.h>
#endif
diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
index 03f90c8a5a7..0208c3c2cbc 100644
--- a/arch/x86/include/asm/mutex_32.h
+++ b/arch/x86/include/asm/mutex_32.h
@@ -42,17 +42,14 @@ do { \
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
*
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
- * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
- * or anything the slow path function returns
+ * Change the count from 1 to a value lower than 1. This function returns 0
+ * if the fastpath succeeds, or -1 otherwise.
*/
-static inline int __mutex_fastpath_lock_retval(atomic_t *count,
- int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count)
{
if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
+ return -1;
else
return 0;
}
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index 68a87b0f8e2..07537a44216 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -16,6 +16,20 @@
*
* Atomically decrements @v and calls <fail_fn> if the result is negative.
*/
+#ifdef CC_HAVE_ASM_GOTO
+static inline void __mutex_fastpath_lock(atomic_t *v,
+ void (*fail_fn)(atomic_t *))
+{
+ asm_volatile_goto(LOCK_PREFIX " decl %0\n"
+ " jns %l[exit]\n"
+ : : "m" (v->counter)
+ : "memory", "cc"
+ : exit);
+ fail_fn(v);
+exit:
+ return;
+}
+#else
#define __mutex_fastpath_lock(v, fail_fn) \
do { \
unsigned long dummy; \
@@ -32,22 +46,20 @@ do { \
: "rax", "rsi", "rdx", "rcx", \
"r8", "r9", "r10", "r11", "memory"); \
} while (0)
+#endif
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
*
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
- * or anything the slow path function returns
+ * Change the count from 1 to a value lower than 1. This function returns 0
+ * if the fastpath succeeds, or -1 otherwise.
*/
-static inline int __mutex_fastpath_lock_retval(atomic_t *count,
- int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count)
{
if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
+ return -1;
else
return 0;
}
@@ -59,6 +71,20 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count,
*
* Atomically increments @v and calls <fail_fn> if the result is nonpositive.
*/
+#ifdef CC_HAVE_ASM_GOTO
+static inline void __mutex_fastpath_unlock(atomic_t *v,
+ void (*fail_fn)(atomic_t *))
+{
+ asm_volatile_goto(LOCK_PREFIX " incl %0\n"
+ " jg %l[exit]\n"
+ : : "m" (v->counter)
+ : "memory", "cc"
+ : exit);
+ fail_fn(v);
+exit:
+ return;
+}
+#else
#define __mutex_fastpath_unlock(v, fail_fn) \
do { \
unsigned long dummy; \
@@ -75,6 +101,7 @@ do { \
: "rax", "rsi", "rdx", "rcx", \
"r8", "r9", "r10", "r11", "memory"); \
} while (0)
+#endif
#define __mutex_slowpath_needs_to_unlock() 1
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index bcdff997668..1da25a5f96f 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -1,10 +1,13 @@
#ifndef _ASM_X86_MWAIT_H
#define _ASM_X86_MWAIT_H
+#include <linux/sched.h>
+
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
#define MWAIT_SUBSTATE_SIZE 4
-#define MWAIT_MAX_NUM_CSTATES 8
+#define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
+#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
#define CPUID_MWAIT_LEAF 5
#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
@@ -12,4 +15,45 @@
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
+static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
+{
+ /* "monitor %eax, %ecx, %edx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc8;"
+ :: "a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+ /* "mwait %eax, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
+}
+
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
+ */
+static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+{
+ if (!current_set_polling_and_test()) {
+ if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
+ mb();
+ clflush((void *)&current_thread_info()->flags);
+ mb();
+ }
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ if (!need_resched())
+ __mwait(eax, ecx);
+ }
+ current_clr_polling();
+}
+
#endif /* _ASM_X86_MWAIT_H */
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index fd3f9f18cf3..5f2fc4441b1 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_NMI_H
#define _ASM_X86_NMI_H
+#include <linux/irq_work.h>
#include <linux/pm.h>
#include <asm/irq.h>
#include <asm/io.h>
@@ -18,15 +19,15 @@ extern int proc_nmi_enabled(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
extern int unknown_nmi_panic;
-void arch_trigger_all_cpu_backtrace(void);
-#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
-#endif
+#endif /* CONFIG_X86_LOCAL_APIC */
#define NMI_FLAG_FIRST 1
enum {
NMI_LOCAL=0,
NMI_UNKNOWN,
+ NMI_SERR,
+ NMI_IO_CHECK,
NMI_MAX
};
@@ -35,8 +36,26 @@ enum {
typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
-int register_nmi_handler(unsigned int, nmi_handler_t, unsigned long,
- const char *);
+struct nmiaction {
+ struct list_head list;
+ nmi_handler_t handler;
+ u64 max_duration;
+ struct irq_work irq_work;
+ unsigned long flags;
+ const char *name;
+};
+
+#define register_nmi_handler(t, fn, fg, n, init...) \
+({ \
+ static struct nmiaction init fn##_na = { \
+ .handler = (fn), \
+ .name = (n), \
+ .flags = (fg), \
+ }; \
+ __register_nmi_handler((t), &fn##_na); \
+})
+
+int __register_nmi_handler(unsigned int, struct nmiaction *);
void unregister_nmi_handler(unsigned int, const char *);
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index 405b4032a60..aff2b335610 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -87,7 +87,11 @@
#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
#define P6_NOP5_ATOMIC P6_NOP5
+#ifdef __ASSEMBLY__
+#define _ASM_MK_NOP(x) .byte x
+#else
#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
+#endif
#if defined(CONFIG_MK7)
#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index bfacd2ccf65..4064acae625 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -39,7 +39,7 @@ static inline void set_apicid_to_node(int apicid, s16 node)
__apicid_to_node[apicid] = node;
}
-extern int __cpuinit numa_cpu_node(int cpu);
+extern int numa_cpu_node(int cpu);
#else /* CONFIG_NUMA */
static inline void set_apicid_to_node(int apicid, s16 node)
@@ -53,17 +53,15 @@ static inline int numa_cpu_node(int cpu)
#endif /* CONFIG_NUMA */
#ifdef CONFIG_X86_32
-# include "numa_32.h"
-#else
-# include "numa_64.h"
+# include <asm/numa_32.h>
#endif
#ifdef CONFIG_NUMA
-extern void __cpuinit numa_set_node(int cpu, int node);
-extern void __cpuinit numa_clear_node(int cpu);
+extern void numa_set_node(int cpu, int node);
+extern void numa_clear_node(int cpu);
extern void __init init_cpu_to_node(void);
-extern void __cpuinit numa_add_cpu(int cpu);
-extern void __cpuinit numa_remove_cpu(int cpu);
+extern void numa_add_cpu(int cpu);
+extern void numa_remove_cpu(int cpu);
#else /* CONFIG_NUMA */
static inline void numa_set_node(int cpu, int node) { }
static inline void numa_clear_node(int cpu) { }
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
deleted file mode 100644
index 0c05f7ae46e..00000000000
--- a/arch/x86/include/asm/numa_64.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_X86_NUMA_64_H
-#define _ASM_X86_NUMA_64_H
-
-extern unsigned long numa_free_all_bootmem(void);
-
-#endif /* _ASM_X86_NUMA_64_H */
diff --git a/arch/x86/include/asm/numachip/numachip.h b/arch/x86/include/asm/numachip/numachip.h
new file mode 100644
index 00000000000..1c6f7f6212c
--- /dev/null
+++ b/arch/x86/include/asm/numachip/numachip.h
@@ -0,0 +1,19 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Numascale NumaConnect-specific header file
+ *
+ * Copyright (C) 2012 Numascale AS. All rights reserved.
+ *
+ * Send feedback to <support@numascale.com>
+ *
+ */
+
+#ifndef _ASM_X86_NUMACHIP_NUMACHIP_H
+#define _ASM_X86_NUMACHIP_NUMACHIP_H
+
+extern int __init pci_numachip_init(void);
+
+#endif /* _ASM_X86_NUMACHIP_NUMACHIP_H */
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h
deleted file mode 100644
index c3b3c322fd8..00000000000
--- a/arch/x86/include/asm/numaq.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Written by: Patricia Gaughen, IBM Corporation
- *
- * Copyright (C) 2002, IBM Corp.
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send feedback to <gone@us.ibm.com>
- */
-
-#ifndef _ASM_X86_NUMAQ_H
-#define _ASM_X86_NUMAQ_H
-
-#ifdef CONFIG_X86_NUMAQ
-
-extern int found_numaq;
-extern int numaq_numa_init(void);
-extern int pci_numaq_init(void);
-
-extern void *xquad_portio;
-
-#define XQUAD_PORTIO_BASE 0xfe400000
-#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
-#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
-
-/*
- * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
- */
-#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private
- quad space */
-
-/*
- * Communication area for each processor on lynxer-processor tests.
- *
- * NOTE: If you change the size of this eachproc structure you need
- * to change the definition for EACH_QUAD_SIZE.
- */
-struct eachquadmem {
- unsigned int priv_mem_start; /* Starting address of this */
- /* quad's private memory. */
- /* This is always 0. */
- /* In MB. */
- unsigned int priv_mem_size; /* Size of this quad's */
- /* private memory. */
- /* In MB. */
- unsigned int low_shrd_mem_strp_start;/* Starting address of this */
- /* quad's low shared block */
- /* (untranslated). */
- /* In MB. */
- unsigned int low_shrd_mem_start; /* Starting address of this */
- /* quad's low shared memory */
- /* (untranslated). */
- /* In MB. */
- unsigned int low_shrd_mem_size; /* Size of this quad's low */
- /* shared memory. */
- /* In MB. */
- unsigned int lmmio_copb_start; /* Starting address of this */
- /* quad's local memory */
- /* mapped I/O in the */
- /* compatibility OPB. */
- /* In MB. */
- unsigned int lmmio_copb_size; /* Size of this quad's local */
- /* memory mapped I/O in the */
- /* compatibility OPB. */
- /* In MB. */
- unsigned int lmmio_nopb_start; /* Starting address of this */
- /* quad's local memory */
- /* mapped I/O in the */
- /* non-compatibility OPB. */
- /* In MB. */
- unsigned int lmmio_nopb_size; /* Size of this quad's local */
- /* memory mapped I/O in the */
- /* non-compatibility OPB. */
- /* In MB. */
- unsigned int io_apic_0_start; /* Starting address of I/O */
- /* APIC 0. */
- unsigned int io_apic_0_sz; /* Size I/O APIC 0. */
- unsigned int io_apic_1_start; /* Starting address of I/O */
- /* APIC 1. */
- unsigned int io_apic_1_sz; /* Size I/O APIC 1. */
- unsigned int hi_shrd_mem_start; /* Starting address of this */
- /* quad's high shared memory.*/
- /* In MB. */
- unsigned int hi_shrd_mem_size; /* Size of this quad's high */
- /* shared memory. */
- /* In MB. */
- unsigned int mps_table_addr; /* Address of this quad's */
- /* MPS tables from BIOS, */
- /* in system space.*/
- unsigned int lcl_MDC_pio_addr; /* Port-I/O address for */
- /* local access of MDC. */
- unsigned int rmt_MDC_mmpio_addr; /* MM-Port-I/O address for */
- /* remote access of MDC. */
- unsigned int mm_port_io_start; /* Starting address of this */
- /* quad's memory mapped Port */
- /* I/O space. */
- unsigned int mm_port_io_size; /* Size of this quad's memory*/
- /* mapped Port I/O space. */
- unsigned int mm_rmt_io_apic_start; /* Starting address of this */
- /* quad's memory mapped */
- /* remote I/O APIC space. */
- unsigned int mm_rmt_io_apic_size; /* Size of this quad's memory*/
- /* mapped remote I/O APIC */
- /* space. */
- unsigned int mm_isa_start; /* Starting address of this */
- /* quad's memory mapped ISA */
- /* space (contains MDC */
- /* memory space). */
- unsigned int mm_isa_size; /* Size of this quad's memory*/
- /* mapped ISA space (contains*/
- /* MDC memory space). */
- unsigned int rmt_qmi_addr; /* Remote addr to access QMI.*/
- unsigned int lcl_qmi_addr; /* Local addr to access QMI. */
-};
-
-/*
- * Note: This structure must be NOT be changed unless the multiproc and
- * OS are changed to reflect the new structure.
- */
-struct sys_cfg_data {
- unsigned int quad_id;
- unsigned int bsp_proc_id; /* Boot Strap Processor in this quad. */
- unsigned int scd_version; /* Version number of this table. */
- unsigned int first_quad_id;
- unsigned int quads_present31_0; /* 1 bit for each quad */
- unsigned int quads_present63_32; /* 1 bit for each quad */
- unsigned int config_flags;
- unsigned int boot_flags;
- unsigned int csr_start_addr; /* Absolute value (not in MB) */
- unsigned int csr_size; /* Absolute value (not in MB) */
- unsigned int lcl_apic_start_addr; /* Absolute value (not in MB) */
- unsigned int lcl_apic_size; /* Absolute value (not in MB) */
- unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */
- unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */
- /* may not be totally populated */
- unsigned int split_mem_enbl; /* 0 for no low shared memory */
- unsigned int mmio_sz; /* Size of total system memory mapped I/O */
- /* (in MB). */
- unsigned int quad_spin_lock; /* Spare location used for quad */
- /* bringup. */
- unsigned int nonzero55; /* For checksumming. */
- unsigned int nonzeroaa; /* For checksumming. */
- unsigned int scd_magic_number;
- unsigned int system_type;
- unsigned int checksum;
- /*
- * memory configuration area for each quad
- */
- struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
-};
-
-void numaq_tsc_disable(void);
-
-#endif /* CONFIG_X86_NUMAQ */
-#endif /* _ASM_X86_NUMAQ_H */
-
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 87bdbca72f9..72f9adf6eca 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -100,25 +100,6 @@ extern void olpc_xo1_pm_wakeup_clear(u16 value);
extern int pci_olpc_init(void);
-/* EC related functions */
-
-extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
- unsigned char *outbuf, size_t outlen);
-
-/* EC commands */
-
-#define EC_FIRMWARE_REV 0x08
-#define EC_WRITE_SCI_MASK 0x1b
-#define EC_WAKE_UP_WLAN 0x24
-#define EC_WLAN_LEAVE_RESET 0x25
-#define EC_READ_EB_MODE 0x2a
-#define EC_SET_SCI_INHIBIT 0x32
-#define EC_SET_SCI_INHIBIT_RELEASE 0x34
-#define EC_WLAN_ENTER_RESET 0x35
-#define EC_WRITE_EXT_SCI_MASK 0x38
-#define EC_SCI_QUERY 0x84
-#define EC_EXT_SCI_QUERY 0x85
-
/* SCI source values */
#define EC_SCI_SRC_EMPTY 0x00
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 8ca82839288..775873d3be5 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -17,6 +17,10 @@
struct page;
+#include <linux/range.h>
+extern struct range pfn_mapped[];
+extern int nr_pfn_mapped;
+
static inline void clear_user_page(void *page, unsigned long vaddr,
struct page *pg)
{
@@ -44,7 +48,8 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
* case properly. Once all supported versions of gcc understand it, we can
* remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
*/
-#define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x)))
+#define __pa_symbol(x) \
+ __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
@@ -66,6 +71,7 @@ extern bool __virt_addr_valid(unsigned long kaddr);
#include <asm-generic/getorder.h>
#define __HAVE_ARCH_GATE_AREA 1
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif /* __KERNEL__ */
#endif /* _ASM_X86_PAGE_H */
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
index da4e762406f..904f528cc8e 100644
--- a/arch/x86/include/asm/page_32.h
+++ b/arch/x86/include/asm/page_32.h
@@ -5,16 +5,13 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_HUGETLB_PAGE
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long);
#else
#define __phys_addr(x) __phys_addr_nodebug(x)
#endif
+#define __phys_addr_symbol(x) __phys_addr(x)
#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
#ifdef CONFIG_FLATMEM
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index ade619ff9e2..f48b17df422 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -15,8 +15,10 @@
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
-#define THREAD_ORDER 1
-#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+#define __START_KERNEL_map __PAGE_OFFSET
+
+#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 072694ed81a..0f1ddee6a0c 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -3,4 +3,40 @@
#include <asm/page_64_types.h>
+#ifndef __ASSEMBLY__
+
+/* duplicated to the one in bootmem.h */
+extern unsigned long max_pfn;
+extern unsigned long phys_base;
+
+static inline unsigned long __phys_addr_nodebug(unsigned long x)
+{
+ unsigned long y = x - __START_KERNEL_map;
+
+ /* use the carry flag to determine if x was < __START_KERNEL_map */
+ x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
+
+ return x;
+}
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern unsigned long __phys_addr(unsigned long);
+extern unsigned long __phys_addr_symbol(unsigned long);
+#else
+#define __phys_addr(x) __phys_addr_nodebug(x)
+#define __phys_addr_symbol(x) \
+ ((unsigned long)(x) - __START_KERNEL_map + phys_base)
+#endif
+
+#define __phys_reloc_hide(x) (x)
+
+#ifdef CONFIG_FLATMEM
+#define pfn_valid(pfn) ((pfn) < max_pfn)
+#endif
+
+void clear_page(void *page);
+void copy_page(void *to, void *from);
+
+#endif /* !__ASSEMBLY__ */
+
#endif /* _ASM_X86_PAGE_64_H */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 7639dbf5d22..678205195ae 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,8 +1,8 @@
#ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H
-#define THREAD_ORDER 1
-#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+#define THREAD_SIZE_ORDER 2
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
@@ -32,11 +32,6 @@
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
-#define __PHYSICAL_START ((CONFIG_PHYSICAL_START + \
- (CONFIG_PHYSICAL_ALIGN - 1)) & \
- ~(CONFIG_PHYSICAL_ALIGN - 1))
-
-#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
@@ -44,32 +39,18 @@
#define __VIRTUAL_MASK_SHIFT 47
/*
- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
- * arch/x86/kernel/head_64.S), and it is mapped here:
+ * Kernel image size is limited to 1GiB due to the fixmap living in the
+ * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use
+ * 512MiB by default, leaving 1.5GiB for modules once the page tables
+ * are fully set up. If kernel ASLR is configured, it can extend the
+ * kernel page table mapping, reducing the size of the modules area.
*/
-#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
-#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
-
-#ifndef __ASSEMBLY__
-void clear_page(void *page);
-void copy_page(void *to, void *from);
-
-/* duplicated to the one in bootmem.h */
-extern unsigned long max_pfn;
-extern unsigned long phys_base;
-
-extern unsigned long __phys_addr(unsigned long);
-#define __phys_reloc_hide(x) (x)
-
-#define vmemmap ((struct page *)VMEMMAP_START)
-
-extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
-extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
-
-#endif /* !__ASSEMBLY__ */
-
-#ifdef CONFIG_FLATMEM
-#define pfn_valid(pfn) ((pfn) < max_pfn)
+#define KERNEL_IMAGE_SIZE_DEFAULT (512 * 1024 * 1024)
+#if defined(CONFIG_RANDOMIZE_BASE) && \
+ CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT
+#define KERNEL_IMAGE_SIZE CONFIG_RANDOMIZE_BASE_MAX_OFFSET
+#else
+#define KERNEL_IMAGE_SIZE KERNEL_IMAGE_SIZE_DEFAULT
#endif
#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index bce688d54c1..f97fbe3abb6 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -33,6 +33,11 @@
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \
+ CONFIG_PHYSICAL_ALIGN)
+
+#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
+
#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
#else
@@ -51,11 +56,12 @@ static inline phys_addr_t get_max_mapped(void)
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
}
+bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
+
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(void);
-extern void free_initmem(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/param.h b/arch/x86/include/asm/param.h
deleted file mode 100644
index 965d4542797..00000000000
--- a/arch/x86/include/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index a7d2db9a74f..cd6e1610e29 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -10,6 +10,7 @@
#include <asm/paravirt_types.h>
#ifndef __ASSEMBLY__
+#include <linux/bug.h>
#include <linux/types.h>
#include <linux/cpumask.h>
@@ -127,21 +128,11 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
}
-static inline int paravirt_rdmsr_regs(u32 *regs)
-{
- return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
-}
-
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
{
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
}
-static inline int paravirt_wrmsr_regs(u32 *regs)
-{
- return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
-}
-
/* These should all do BUG_ON(_err), but our headers are too tangled. */
#define rdmsr(msr, val1, val2) \
do { \
@@ -175,9 +166,6 @@ do { \
_err; \
})
-#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
-#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
-
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
{
int err;
@@ -185,32 +173,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*p = paravirt_read_msr(msr, &err);
return err;
}
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
-{
- u32 gprs[8] = { 0 };
- int err;
-
- gprs[1] = msr;
- gprs[7] = 0x9c5a203a;
-
- err = paravirt_rdmsr_regs(gprs);
-
- *p = gprs[0] | ((u64)gprs[2] << 32);
-
- return err;
-}
-
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
-{
- u32 gprs[8] = { 0 };
-
- gprs[0] = (u32)val;
- gprs[1] = msr;
- gprs[2] = val >> 32;
- gprs[7] = 0x9c5a203a;
-
- return paravirt_wrmsr_regs(gprs);
-}
static inline u64 paravirt_read_tsc(void)
{
@@ -230,9 +192,9 @@ static inline unsigned long long paravirt_sched_clock(void)
return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
}
-struct jump_label_key;
-extern struct jump_label_key paravirt_steal_enabled;
-extern struct jump_label_key paravirt_steal_rq_enabled;
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
static inline u64 paravirt_steal_clock(int cpu)
{
@@ -251,6 +213,8 @@ do { \
high = _l >> 32; \
} while (0)
+#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
+
static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
{
return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
@@ -298,10 +262,6 @@ static inline void set_ldt(const void *addr, unsigned entries)
{
PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
}
-static inline void store_gdt(struct desc_ptr *dtr)
-{
- PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
-}
static inline void store_idt(struct desc_ptr *dtr)
{
PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
@@ -396,9 +356,10 @@ static inline void __flush_tlb_single(unsigned long addr)
static inline void flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
- unsigned long va)
+ unsigned long start,
+ unsigned long end)
{
- PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
+ PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
}
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@ -563,7 +524,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
@@ -574,7 +534,6 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
native_pmd_val(pmd));
}
-#endif
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -740,7 +699,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
-void arch_flush_lazy_mmu_mode(void);
+static inline void arch_flush_lazy_mmu_mode(void)
+{
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
+}
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
@@ -750,36 +712,16 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-static inline int arch_spin_is_locked(struct arch_spinlock *lock)
-{
- return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
-}
-
-static inline int arch_spin_is_contended(struct arch_spinlock *lock)
-{
- return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
-}
-#define arch_spin_is_contended arch_spin_is_contended
-
-static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
-{
- PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
-}
-
-static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
- unsigned long flags)
-{
- PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
-}
-
-static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
+static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
+ __ticket_t ticket)
{
- return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
+ PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
}
-static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
+static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
+ __ticket_t ticket)
{
- PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
+ PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
}
#endif
@@ -839,9 +781,9 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
*/
#define PV_CALLEE_SAVE_REGS_THUNK(func) \
extern typeof(func) __raw_callee_save_##func; \
- static void *__##func##__ __used = func; \
\
asm(".pushsection .text;" \
+ ".globl __raw_callee_save_" #func " ; " \
"__raw_callee_save_" #func ": " \
PV_SAVE_ALL_CALLER_REGS \
"call " #func ";" \
@@ -1022,10 +964,8 @@ extern void default_banner(void);
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
)
-#define GET_CR2_INTO_RCX \
- call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
- movq %rax, %rcx; \
- xorq %rax, %rax;
+#define GET_CR2_INTO_RAX \
+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 8e8b9a4987e..7549b8b369e 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -91,6 +91,7 @@ struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
void (*enter)(void);
void (*leave)(void);
+ void (*flush)(void);
};
struct pv_time_ops {
@@ -122,7 +123,7 @@ struct pv_cpu_ops {
void (*load_tr_desc)(void);
void (*load_gdt)(const struct desc_ptr *);
void (*load_idt)(const struct desc_ptr *);
- void (*store_gdt)(struct desc_ptr *);
+ /* store_gdt has been removed. */
void (*store_idt)(struct desc_ptr *);
void (*set_ldt)(const void *desc, unsigned entries);
unsigned long (*store_tr)(void);
@@ -153,9 +154,7 @@ struct pv_cpu_ops {
/* MSR, PMC and TSR operations.
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
u64 (*read_msr)(unsigned int msr, int *err);
- int (*rdmsr_regs)(u32 *regs);
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
- int (*wrmsr_regs)(u32 *regs);
u64 (*read_tsc)(void);
u64 (*read_pmc)(int counter);
@@ -250,7 +249,8 @@ struct pv_mmu_ops {
void (*flush_tlb_single)(unsigned long addr);
void (*flush_tlb_others)(const struct cpumask *cpus,
struct mm_struct *mm,
- unsigned long va);
+ unsigned long start,
+ unsigned long end);
/* Hooks for allocating and freeing a pagetable top-level */
int (*pgd_alloc)(struct mm_struct *mm);
@@ -327,13 +327,15 @@ struct pv_mmu_ops {
};
struct arch_spinlock;
+#ifdef CONFIG_SMP
+#include <asm/spinlock_types.h>
+#else
+typedef u16 __ticket_t;
+#endif
+
struct pv_lock_ops {
- int (*spin_is_locked)(struct arch_spinlock *lock);
- int (*spin_is_contended)(struct arch_spinlock *lock);
- void (*spin_lock)(struct arch_spinlock *lock);
- void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
- int (*spin_trylock)(struct arch_spinlock *lock);
- void (*spin_unlock)(struct arch_spinlock *lock);
+ struct paravirt_callee_save lock_spinning;
+ void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
};
/* This contains all the paravirt structures: we get a convenient
@@ -386,9 +388,11 @@ extern struct pv_lock_ops pv_lock_ops;
_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
/* Simple instruction patching code. */
-#define DEF_NATIVE(ops, name, code) \
- extern const char start_##ops##_##name[], end_##ops##_##name[]; \
- asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
+#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
+
+#define DEF_NATIVE(ops, name, code) \
+ __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+ asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
unsigned paravirt_patch_nop(void);
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
@@ -680,6 +684,7 @@ void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
+void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void);
u32 _paravirt_ident_32(u32);
diff --git a/arch/x86/include/asm/parport.h b/arch/x86/include/asm/parport.h
index 3c4ffeb467e..0d2d3b29118 100644
--- a/arch/x86/include/asm/parport.h
+++ b/arch/x86/include/asm/parport.h
@@ -1,8 +1,8 @@
#ifndef _ASM_X86_PARPORT_H
#define _ASM_X86_PARPORT_H
-static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
-static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
+static int parport_pc_find_isa_ports(int autoirq, int autodma);
+static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
{
return parport_pc_find_isa_ports(autoirq, autodma);
}
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index df75d07571c..0892ea0e683 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -14,6 +14,9 @@
struct pci_sysdata {
int domain; /* PCI domain */
int node; /* NUMA node */
+#ifdef CONFIG_ACPI
+ struct acpi_device *companion; /* ACPI companion device */
+#endif
#ifdef CONFIG_X86_64
void *iommu; /* IOMMU private data */
#endif
@@ -23,11 +26,6 @@ extern int pci_routeirq;
extern int noioapicquirk;
extern int noioapicreroute;
-/* scan a bus after allocating a pci_sysdata for it */
-extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
- int node);
-extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
-
#ifdef CONFIG_PCI
#ifdef CONFIG_PCI_DOMAINS
@@ -67,10 +65,9 @@ extern unsigned long pci_mem_start;
extern int pcibios_enabled;
void pcibios_config_init(void);
-struct pci_bus *pcibios_scan_root(int bus);
+void pcibios_scan_root(int bus);
void pcibios_set_master(struct pci_dev *dev);
-void pcibios_penalize_isa_irq(int irq, int active);
struct irq_routing_table *pcibios_get_irq_routing_table(void);
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
@@ -97,43 +94,16 @@ static inline void early_quirks(void) { }
extern void pci_iommu_alloc(void);
#ifdef CONFIG_PCI_MSI
-/* MSI arch specific hooks */
-static inline int x86_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- return x86_msi.setup_msi_irqs(dev, nvec, type);
-}
-
-static inline void x86_teardown_msi_irqs(struct pci_dev *dev)
-{
- x86_msi.teardown_msi_irqs(dev);
-}
-
-static inline void x86_teardown_msi_irq(unsigned int irq)
-{
- x86_msi.teardown_msi_irq(irq);
-}
-static inline void x86_restore_msi_irqs(struct pci_dev *dev, int irq)
-{
- x86_msi.restore_msi_irqs(dev, irq);
-}
-#define arch_setup_msi_irqs x86_setup_msi_irqs
-#define arch_teardown_msi_irqs x86_teardown_msi_irqs
-#define arch_teardown_msi_irq x86_teardown_msi_irq
-#define arch_restore_msi_irqs x86_restore_msi_irqs
/* implemented in arch/x86/kernel/apic/io_apic. */
+struct msi_desc;
int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void native_teardown_msi_irq(unsigned int irq);
-void native_restore_msi_irqs(struct pci_dev *dev, int irq);
-/* default to the implementation in drivers/lib/msi.c */
-#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
-#define HAVE_DEFAULT_MSI_RESTORE_IRQS
-void default_teardown_msi_irqs(struct pci_dev *dev);
-void default_restore_msi_irqs(struct pci_dev *dev, int irq);
+void native_restore_msi_irqs(struct pci_dev *dev);
+int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ unsigned int irq_base, unsigned int irq_offset);
#else
#define native_setup_msi_irqs NULL
#define native_teardown_msi_irq NULL
-#define default_teardown_msi_irqs NULL
-#define default_restore_msi_irqs NULL
#endif
#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
@@ -141,7 +111,7 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq);
#endif /* __KERNEL__ */
#ifdef CONFIG_X86_64
-#include "pci_64.h"
+#include <asm/pci_64.h>
#endif
/* implement the pci_ DMA API in terms of the generic device dma_ one */
@@ -149,7 +119,6 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq);
/* generic pci stuff */
#include <asm-generic/pci.h>
-#define PCIBIOS_MAX_MEM_32 0xffffffff
#ifdef CONFIG_NUMA
/* Returns the node based on pci bus */
@@ -171,4 +140,16 @@ cpumask_of_pcibus(const struct pci_bus *bus)
}
#endif
+struct pci_setup_rom {
+ struct setup_data data;
+ uint16_t vendor;
+ uint16_t devid;
+ uint64_t pcilen;
+ unsigned long segment;
+ unsigned long bus;
+ unsigned long device;
+ unsigned long function;
+ uint8_t romdata[0];
+};
+
#endif /* _ASM_X86_PCI_H */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b3a53174602..fa1195dae42 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -7,9 +7,13 @@
#undef DEBUG
#ifdef DEBUG
-#define DBG(x...) printk(x)
+#define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
#else
-#define DBG(x...)
+#define DBG(fmt, ...) \
+do { \
+ if (0) \
+ printk(fmt, ##__VA_ARGS__); \
+} while (0)
#endif
#define PCI_PROBE_BIOS 0x0001
@@ -50,7 +54,6 @@ void pcibios_set_cache_line_size(void);
/* pci-pc.c */
extern int pcibios_last_bus;
-extern struct pci_bus *pci_root_bus;
extern struct pci_ops pci_root_ops;
void pcibios_scan_specific_bus(int busn);
@@ -100,6 +103,7 @@ struct pci_raw_ops {
extern const struct pci_raw_ops *raw_pci_ops;
extern const struct pci_raw_ops *raw_pci_ext_ops;
+extern const struct pci_raw_ops pci_mmcfg;
extern const struct pci_raw_ops pci_direct_conf1;
extern bool port_cf9_safe;
@@ -135,6 +139,11 @@ struct pci_mmcfg_region {
extern int __init pci_mmcfg_arch_init(void);
extern void __init pci_mmcfg_arch_free(void);
+extern int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg);
+extern void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg);
+extern int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
+ phys_addr_t addr);
+extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end);
extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
extern struct list_head pci_mmcfg_list;
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 7a11910a63c..851bcdc5db0 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -46,13 +46,13 @@
#ifdef CONFIG_SMP
#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
-#define __my_cpu_offset percpu_read(this_cpu_off)
+#define __my_cpu_offset this_cpu_read(this_cpu_off)
/*
* Compared to the generic __my_cpu_offset version, the following
* saves one instruction and avoids clobbering a temp register.
*/
-#define __this_cpu_ptr(ptr) \
+#define raw_cpu_ptr(ptr) \
({ \
unsigned long tcp_ptr__; \
__verify_pcpu_ptr(ptr); \
@@ -128,7 +128,8 @@ do { \
do { \
typedef typeof(var) pao_T__; \
const int pao_ID__ = (__builtin_constant_p(val) && \
- ((val) == 1 || (val) == -1)) ? (val) : 0; \
+ ((val) == 1 || (val) == -1)) ? \
+ (int)(val) : 0; \
if (0) { \
pao_T__ pao_tmp__; \
pao_tmp__ = (val); \
@@ -351,46 +352,35 @@ do { \
})
/*
- * percpu_read() makes gcc load the percpu variable every time it is
- * accessed while percpu_read_stable() allows the value to be cached.
- * percpu_read_stable() is more efficient and can be used if its value
+ * this_cpu_read() makes gcc load the percpu variable every time it is
+ * accessed while this_cpu_read_stable() allows the value to be cached.
+ * this_cpu_read_stable() is more efficient and can be used if its value
* is guaranteed to be valid across cpus. The current users include
* get_current() and get_thread_info() both of which are actually
* per-thread variables implemented as per-cpu variables and thus
* stable for the duration of the respective task.
*/
-#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
-#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
-#define percpu_write(var, val) percpu_to_op("mov", var, val)
-#define percpu_add(var, val) percpu_add_op(var, val)
-#define percpu_sub(var, val) percpu_add_op(var, -(val))
-#define percpu_and(var, val) percpu_to_op("and", var, val)
-#define percpu_or(var, val) percpu_to_op("or", var, val)
-#define percpu_xor(var, val) percpu_to_op("xor", var, val)
-#define percpu_inc(var) percpu_unary_op("inc", var)
-
-#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
-#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
-#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
-
-#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
-#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
-#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
-#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
-#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
-#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
-#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
-#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
-#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
-#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
-#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
-#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
-#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
-#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
-#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
-#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
-#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
-#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
+#define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
+
+#define raw_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define raw_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define raw_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+
+#define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
+#define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
+#define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
+#define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
+#define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
+#define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
+#define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -407,30 +397,24 @@ do { \
#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
-#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
-#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
-#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
-#ifndef CONFIG_M386
-#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
-#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
-#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
-#define __this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
-#define __this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
-#define __this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
-
-#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
-#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
-#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
+#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
+#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
+#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
+#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+
+#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
+#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
+#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
-#endif /* !CONFIG_M386 */
-
#ifdef CONFIG_X86_CMPXCHG64
#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
@@ -443,7 +427,7 @@ do { \
__ret; \
})
-#define __this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
+#define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
#endif /* CONFIG_X86_CMPXCHG64 */
@@ -452,24 +436,22 @@ do { \
* 32 bit must fall back to generic operations.
*/
#ifdef CONFIG_X86_64
-#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
-#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
-#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
-#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
-#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
-#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
-#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
-#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
-#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
-
-#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
-#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
-#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
-#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
-#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
-#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
-#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
-#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
+#define raw_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
+#define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
+#define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
+#define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
+#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+
+#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
+#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
/*
@@ -492,7 +474,7 @@ do { \
__ret; \
})
-#define __this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
+#define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
#endif
@@ -512,7 +494,11 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
{
unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
- return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0;
+#ifdef CONFIG_X86_64
+ return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
+#else
+ return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
+#endif
}
static inline int x86_this_cpu_variable_test_bit(int nr,
@@ -555,6 +541,12 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
{ [0 ... NR_CPUS-1] = _initvalue }; \
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
+#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
+ DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
+ __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
+ { [0 ... NR_CPUS-1] = _initvalue }; \
+ __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
+
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
EXPORT_PER_CPU_SYMBOL(_name)
@@ -563,6 +555,11 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
extern __typeof__(_type) *_name##_early_ptr; \
extern __typeof__(_type) _name##_early_map[]
+#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
+ DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
+ extern __typeof__(_type) *_name##_early_ptr; \
+ extern __typeof__(_type) _name##_early_map[]
+
#define early_per_cpu_ptr(_name) (_name##_early_ptr)
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
#define early_per_cpu(_name, _cpu) \
@@ -574,12 +571,18 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
DEFINE_PER_CPU(_type, _name) = _initvalue
+#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
+ DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
+
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
EXPORT_PER_CPU_SYMBOL(_name)
#define DECLARE_EARLY_PER_CPU(_type, _name) \
DECLARE_PER_CPU(_type, _name)
+#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
+ DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
+
#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
#define early_per_cpu_ptr(_name) NULL
/* no early_per_cpu_map() */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 096c975e099..8249df45d2f 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -5,11 +5,10 @@
* Performance event hw details:
*/
-#define X86_PMC_MAX_GENERIC 32
-#define X86_PMC_MAX_FIXED 3
+#define INTEL_PMC_MAX_GENERIC 32
+#define INTEL_PMC_MAX_FIXED 3
+#define INTEL_PMC_IDX_FIXED 32
-#define X86_PMC_IDX_GENERIC 0
-#define X86_PMC_IDX_FIXED 32
#define X86_PMC_IDX_MAX 64
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
@@ -23,14 +22,23 @@
#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
+#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
-#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40)
-#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41)
+#define HSW_IN_TX (1ULL << 32)
+#define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
+
+#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
+#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
+#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
+
+#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
+#define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
+ (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
#define AMD64_EVENTSEL_EVENT \
(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
@@ -46,9 +54,12 @@
#define AMD64_RAW_EVENT_MASK \
(X86_RAW_EVENT_MASK | \
AMD64_EVENTSEL_EVENT)
+#define AMD64_RAW_EVENT_MASK_NB \
+ (AMD64_EVENTSEL_EVENT | \
+ ARCH_PERFMON_EVENTSEL_UMASK)
#define AMD64_NUM_COUNTERS 4
-#define AMD64_NUM_COUNTERS_F15H 6
-#define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H
+#define AMD64_NUM_COUNTERS_CORE 6
+#define AMD64_NUM_COUNTERS_NB 4
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -120,16 +131,16 @@ struct x86_pmu_capability {
/* Instr_Retired.Any: */
#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
-#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
+#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
/* CPU_CLK_Unhalted.Core: */
#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
-#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
+#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
/* CPU_CLK_Unhalted.Ref: */
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
-#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2)
-#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
+#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
+#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
/*
* We model BTS tracing as another fixed-mode PMC.
@@ -138,7 +149,7 @@ struct x86_pmu_capability {
* values are used by actual fixed events and higher values are used
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
*/
-#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
+#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
/*
* IBS cpuid feature detection
@@ -157,6 +168,7 @@ struct x86_pmu_capability {
#define IBS_CAPS_OPCNT (1U<<4)
#define IBS_CAPS_BRNTRGT (1U<<5)
#define IBS_CAPS_OPCNTEXT (1U<<6)
+#define IBS_CAPS_RIPINVALIDCHK (1U<<7)
#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
| IBS_CAPS_FETCHSAM \
@@ -169,33 +181,43 @@ struct x86_pmu_capability {
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
#define IBSCTL_LVT_OFFSET_MASK 0x0F
-/* IbsFetchCtl bits/masks */
+/* ibs fetch bits/masks */
#define IBS_FETCH_RAND_EN (1ULL<<57)
#define IBS_FETCH_VAL (1ULL<<49)
#define IBS_FETCH_ENABLE (1ULL<<48)
#define IBS_FETCH_CNT 0xFFFF0000ULL
#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
-/* IbsOpCtl bits */
+/* ibs op bits/masks */
+/* lower 4 bits of the current count are ignored: */
+#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
#define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17)
#define IBS_OP_MAX_CNT 0x0000FFFFULL
#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
+#define IBS_RIP_INVALID (1ULL<<38)
+#ifdef CONFIG_X86_LOCAL_APIC
extern u32 get_ibs_caps(void);
+#else
+static inline u32 get_ibs_caps(void) { return 0; }
+#endif
#ifdef CONFIG_PERF_EVENTS
extern void perf_events_lapic_init(void);
-#define PERF_EVENT_INDEX_OFFSET 0
-
/*
- * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
- * This flag is otherwise unused and ABI specified to be 0, so nobody should
- * care what we do with it.
+ * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
+ * unused and ABI specified to be 0, so nobody should care what we do with
+ * them.
+ *
+ * EXACT - the IP points to the exact instruction that triggered the
+ * event (HW bugs exempt).
+ * VM - original X86_VM_MASK; see set_linear_ip().
*/
#define PERF_EFLAGS_EXACT (1UL << 3)
+#define PERF_EFLAGS_VM (1UL << 5)
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
@@ -227,8 +249,9 @@ struct perf_guest_switch_msr {
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
+extern void perf_check_microcode(void);
#else
-static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
+static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
*nr = 0;
return NULL;
@@ -240,6 +263,17 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
}
static inline void perf_events_lapic_init(void) { }
+static inline void perf_check_microcode(void) { }
#endif
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
+ extern void amd_pmu_enable_virt(void);
+ extern void amd_pmu_disable_virt(void);
+#else
+ static inline void amd_pmu_enable_virt(void) { }
+ static inline void amd_pmu_disable_virt(void) { }
+#endif
+
+#define arch_perf_out_copy_user copy_from_user_nmi
+
#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 4f7e67e2345..85e13ccf15c 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -24,45 +24,45 @@
#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
-#define P4_ESCR_EVENT_MASK 0x7e000000U
+#define P4_ESCR_EVENT_MASK 0x7e000000ULL
#define P4_ESCR_EVENT_SHIFT 25
-#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U
+#define P4_ESCR_EVENTMASK_MASK 0x01fffe00ULL
#define P4_ESCR_EVENTMASK_SHIFT 9
-#define P4_ESCR_TAG_MASK 0x000001e0U
+#define P4_ESCR_TAG_MASK 0x000001e0ULL
#define P4_ESCR_TAG_SHIFT 5
-#define P4_ESCR_TAG_ENABLE 0x00000010U
-#define P4_ESCR_T0_OS 0x00000008U
-#define P4_ESCR_T0_USR 0x00000004U
-#define P4_ESCR_T1_OS 0x00000002U
-#define P4_ESCR_T1_USR 0x00000001U
+#define P4_ESCR_TAG_ENABLE 0x00000010ULL
+#define P4_ESCR_T0_OS 0x00000008ULL
+#define P4_ESCR_T0_USR 0x00000004ULL
+#define P4_ESCR_T1_OS 0x00000002ULL
+#define P4_ESCR_T1_USR 0x00000001ULL
#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
-#define P4_CCCR_OVF 0x80000000U
-#define P4_CCCR_CASCADE 0x40000000U
-#define P4_CCCR_OVF_PMI_T0 0x04000000U
-#define P4_CCCR_OVF_PMI_T1 0x08000000U
-#define P4_CCCR_FORCE_OVF 0x02000000U
-#define P4_CCCR_EDGE 0x01000000U
-#define P4_CCCR_THRESHOLD_MASK 0x00f00000U
+#define P4_CCCR_OVF 0x80000000ULL
+#define P4_CCCR_CASCADE 0x40000000ULL
+#define P4_CCCR_OVF_PMI_T0 0x04000000ULL
+#define P4_CCCR_OVF_PMI_T1 0x08000000ULL
+#define P4_CCCR_FORCE_OVF 0x02000000ULL
+#define P4_CCCR_EDGE 0x01000000ULL
+#define P4_CCCR_THRESHOLD_MASK 0x00f00000ULL
#define P4_CCCR_THRESHOLD_SHIFT 20
-#define P4_CCCR_COMPLEMENT 0x00080000U
-#define P4_CCCR_COMPARE 0x00040000U
-#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U
+#define P4_CCCR_COMPLEMENT 0x00080000ULL
+#define P4_CCCR_COMPARE 0x00040000ULL
+#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000ULL
#define P4_CCCR_ESCR_SELECT_SHIFT 13
-#define P4_CCCR_ENABLE 0x00001000U
-#define P4_CCCR_THREAD_SINGLE 0x00010000U
-#define P4_CCCR_THREAD_BOTH 0x00020000U
-#define P4_CCCR_THREAD_ANY 0x00030000U
-#define P4_CCCR_RESERVED 0x00000fffU
+#define P4_CCCR_ENABLE 0x00001000ULL
+#define P4_CCCR_THREAD_SINGLE 0x00010000ULL
+#define P4_CCCR_THREAD_BOTH 0x00020000ULL
+#define P4_CCCR_THREAD_ANY 0x00030000ULL
+#define P4_CCCR_RESERVED 0x00000fffULL
#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
#define P4_GEN_ESCR_EMASK(class, name, bit) \
- class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
+ class##__##name = ((1ULL << bit) << P4_ESCR_EVENTMASK_SHIFT)
#define P4_ESCR_EMASK_BIT(class, name) class##__##name
/*
@@ -107,7 +107,7 @@
* P4_PEBS_CONFIG_MASK and related bits on
* modification.)
*/
-#define P4_CONFIG_ALIASABLE (1 << 9)
+#define P4_CONFIG_ALIASABLE (1ULL << 9)
/*
* The bits we allow to pass for RAW events
@@ -784,17 +784,17 @@ enum P4_ESCR_EMASKS {
* Note we have UOP and PEBS bits reserved for now
* just in case if we will need them once
*/
-#define P4_PEBS_CONFIG_ENABLE (1 << 7)
-#define P4_PEBS_CONFIG_UOP_TAG (1 << 8)
-#define P4_PEBS_CONFIG_METRIC_MASK 0x3f
-#define P4_PEBS_CONFIG_MASK 0xff
+#define P4_PEBS_CONFIG_ENABLE (1ULL << 7)
+#define P4_PEBS_CONFIG_UOP_TAG (1ULL << 8)
+#define P4_PEBS_CONFIG_METRIC_MASK 0x3FLL
+#define P4_PEBS_CONFIG_MASK 0xFFLL
/*
* mem: Only counters MSR_IQ_COUNTER4 (16) and
* MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling
*/
-#define P4_PEBS_ENABLE 0x02000000U
-#define P4_PEBS_ENABLE_UOP_TAG 0x01000000U
+#define P4_PEBS_ENABLE 0x02000000ULL
+#define P4_PEBS_ENABLE_UOP_TAG 0x01000000ULL
#define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK)
#define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK)
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index b4389a468fb..c4412e972bb 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -80,12 +80,21 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
#if PAGETABLE_LEVELS > 2
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ struct page *page;
+ page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+ if (!page)
+ return NULL;
+ if (!pgtable_pmd_page_ctor(page)) {
+ __free_pages(page, 0);
+ return NULL;
+ }
+ return (pmd_t *)page_address(page);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
free_page((unsigned long)pmd);
}
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 98391db840c..206a87fdd22 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -2,9 +2,9 @@
#define _ASM_X86_PGTABLE_2LEVEL_H
#define pte_ERROR(e) \
- printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+ pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low)
#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+ pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e))
/*
* Certain architectures need to do special things when PTEs
@@ -55,47 +55,52 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
+/* Bit manipulation helper on pte/pgoff entry */
+static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
+ unsigned long mask, unsigned int leftshift)
+{
+ return ((value >> rightshift) & mask) << leftshift;
+}
+
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
- * split up the 29 bits of offset into this range:
+ * split up the 29 bits of offset into this range.
*/
#define PTE_FILE_MAX_BITS 29
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
-#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
-#else
-#define PTE_FILE_SHIFT2 (_PAGE_BIT_PROTNONE + 1)
-#define PTE_FILE_SHIFT3 (_PAGE_BIT_FILE + 1)
-#endif
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
-#define pte_to_pgoff(pte) \
- ((((pte).pte_low >> PTE_FILE_SHIFT1) \
- & ((1U << PTE_FILE_BITS1) - 1)) \
- + ((((pte).pte_low >> PTE_FILE_SHIFT2) \
- & ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \
- + (((pte).pte_low >> PTE_FILE_SHIFT3) \
- << (PTE_FILE_BITS1 + PTE_FILE_BITS2)))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { .pte_low = \
- (((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
- + ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \
- << PTE_FILE_SHIFT2) \
- + (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
- << PTE_FILE_SHIFT3) \
- + _PAGE_FILE })
+#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
+#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
+
+#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
+#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
+
+static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
+{
+ return (pgoff_t)
+ (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3));
+}
+
+static __always_inline pte_t pgoff_to_pte(pgoff_t off)
+{
+ return (pte_t){
+ .pte_low =
+ pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
+ pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
+ pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) +
+ _PAGE_FILE,
+ };
+}
/* Encode and de-code a swap entry */
-#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
-#else
-#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
-#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
-#endif
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index effff47a3c8..81bb91b49a8 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -9,13 +9,13 @@
*/
#define pte_ERROR(e) \
- printk("%s:%d: bad pte %p(%08lx%08lx).\n", \
+ pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \
__FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %p(%016Lx).\n", \
+ pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
__FILE__, __LINE__, &(e), pmd_val(e))
#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %p(%016Lx).\n", \
+ pr_err("%s:%d: bad pgd %p(%016Lx)\n", \
__FILE__, __LINE__, &(e), pgd_val(e))
/* Rules for using set_pte: the pte being assigned *must* be
@@ -31,6 +31,60 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
ptep->pte_low = pte.pte_low;
}
+#define pmd_read_atomic pmd_read_atomic
+/*
+ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
+ * a "*pmdp" dereference done by gcc. Problem is, in certain places
+ * where pte_offset_map_lock is called, concurrent page faults are
+ * allowed, if the mmap_sem is hold for reading. An example is mincore
+ * vs page faults vs MADV_DONTNEED. On the page fault side
+ * pmd_populate rightfully does a set_64bit, but if we're reading the
+ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
+ * because gcc will not read the 64bit of the pmd atomically. To fix
+ * this all places running pmd_offset_map_lock() while holding the
+ * mmap_sem in read mode, shall read the pmdp pointer using this
+ * function to know if the pmd is null nor not, and in turn to know if
+ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
+ * operations.
+ *
+ * Without THP if the mmap_sem is hold for reading, the pmd can only
+ * transition from null to not null while pmd_read_atomic runs. So
+ * we can always return atomic pmd values with this function.
+ *
+ * With THP if the mmap_sem is hold for reading, the pmd can become
+ * trans_huge or none or point to a pte (and in turn become "stable")
+ * at any time under pmd_read_atomic. We could read it really
+ * atomically here with a atomic64_read for the THP enabled case (and
+ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
+ * only return an atomic pmdval if the low part of the pmdval is later
+ * found stable (i.e. pointing to a pte). And we're returning a none
+ * pmdval if the low part of the pmd is none. In some cases the high
+ * and low part of the pmdval returned may not be consistent if THP is
+ * enabled (the low part may point to previously mapped hugepage,
+ * while the high part may point to a more recently mapped hugepage),
+ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
+ * of the pmd to be read atomically to decide if the pmd is unstable
+ * or not, with the only exception of when the low part of the pmd is
+ * zero in which case we return a none pmd.
+ */
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ pmdval_t ret;
+ u32 *tmp = (u32 *)pmdp;
+
+ ret = (pmdval_t) (*tmp);
+ if (ret) {
+ /*
+ * If the low part is null, we must not read the high part
+ * or we can end up with a partial pmd.
+ */
+ smp_rmb();
+ ret |= ((pmdval_t)*(tmp + 1)) << 32;
+ }
+
+ return (pmd_t) { ret };
+}
+
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{
set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
@@ -125,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
+ *
+ * For soft-dirty tracking 11 bit is taken from
+ * the low part of pte as well.
*/
#define pte_to_pgoff(pte) ((pte).pte_high)
#define pgoff_to_pte(off) \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 49afb3f41eb..0ec05601261 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -15,14 +15,16 @@
: (prot))
#ifndef __ASSEMBLY__
-
#include <asm/x86_init.h>
+void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
+
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+ __visible;
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
extern spinlock_t pgd_lock;
@@ -129,7 +131,8 @@ static inline int pte_exec(pte_t pte)
static inline int pte_special(pte_t pte)
{
- return pte_flags(pte) & _PAGE_SPECIAL;
+ return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) ==
+ (_PAGE_PRESENT|_PAGE_SPECIAL);
}
static inline unsigned long pte_pfn(pte_t pte)
@@ -142,12 +145,16 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte)
{
- return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
- (_PAGE_PSE | _PAGE_PRESENT);
+ return pmd_flags(pte) & _PAGE_PSE;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -203,7 +210,7 @@ static inline pte_t pte_mkexec(pte_t pte)
static inline pte_t pte_mkdirty(pte_t pte)
{
- return pte_set_flags(pte, _PAGE_DIRTY);
+ return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
}
static inline pte_t pte_mkyoung(pte_t pte)
@@ -267,7 +274,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
- return pmd_set_flags(pmd, _PAGE_DIRTY);
+ return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
}
static inline pmd_t pmd_mkhuge(pmd_t pmd)
@@ -290,6 +297,44 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
return pmd_clear_flags(pmd, _PAGE_PRESENT);
}
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline int pte_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.
@@ -384,13 +429,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_X86_32
-# include "pgtable_32.h"
+# include <asm/pgtable_32.h>
#else
-# include "pgtable_64.h"
+# include <asm/pgtable_64.h>
#endif
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
+#include <linux/mmdebug.h>
+#include <linux/log2.h>
static inline int pte_none(pte_t pte)
{
@@ -405,9 +452,29 @@ static inline int pte_same(pte_t a, pte_t b)
static inline int pte_present(pte_t a)
{
+ return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+ _PAGE_NUMA);
+}
+
+#define pte_present_nonuma pte_present_nonuma
+static inline int pte_present_nonuma(pte_t a)
+{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
+#define pte_accessible pte_accessible
+static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
+{
+ if (pte_flags(a) & _PAGE_PRESENT)
+ return true;
+
+ if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
+ mm_tlb_flush_pending(mm))
+ return true;
+
+ return false;
+}
+
static inline int pte_hidden(pte_t pte)
{
return pte_flags(pte) & _PAGE_HIDDEN;
@@ -415,7 +482,14 @@ static inline int pte_hidden(pte_t pte)
static inline int pmd_present(pmd_t pmd)
{
- return pmd_flags(pmd) & _PAGE_PRESENT;
+ /*
+ * Checking for _PAGE_PSE is needed too because
+ * split_huge_page will temporarily clear the present bit (but
+ * the _PAGE_PSE flag will remain set at all times while the
+ * _PAGE_PRESENT bit is clear).
+ */
+ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
+ _PAGE_NUMA);
}
static inline int pmd_none(pmd_t pmd)
@@ -474,6 +548,11 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
static inline int pmd_bad(pmd_t pmd)
{
+#ifdef CONFIG_NUMA_BALANCING
+ /* pmd_numa check */
+ if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
+ return 0;
+#endif
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
@@ -482,9 +561,6 @@ static inline unsigned long pages_to_mb(unsigned long npg)
return npg >> (20 - PAGE_SHIFT);
}
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
#if PAGETABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
@@ -597,6 +673,8 @@ static inline int pgd_none(pgd_t pgd)
#ifndef __ASSEMBLY__
extern int direct_gbpages;
+void init_mem_mapping(void);
+void early_alloc_pgt_buf(void);
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
@@ -763,6 +841,52 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
memcpy(dst, src, count * sizeof(pgd_t));
}
+#define PTE_SHIFT ilog2(PTRS_PER_PTE)
+static inline int page_level_shift(enum pg_level level)
+{
+ return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
+}
+static inline unsigned long page_level_size(enum pg_level level)
+{
+ return 1UL << page_level_shift(level);
+}
+static inline unsigned long page_level_mask(enum pg_level level)
+{
+ return ~(page_level_size(level) - 1);
+}
+
+/*
+ * The x86 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd)
+{
+}
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ VM_BUG_ON(pte_present_nonuma(pte));
+ return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ VM_BUG_ON(pte_present_nonuma(pte));
+ return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ VM_BUG_ON(pte_present_nonuma(pte));
+ return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+#endif
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 0c92113c4cb..9ee322103c6 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -66,12 +66,6 @@ do { \
__flush_tlb_one((vaddr)); \
} while (0)
-/*
- * The i386 doesn't have any external MMU info: the kernel page
- * tables contain all the necessary information.
- */
-#define update_mmu_cache(vma, address, ptep) do { } while (0)
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 975f709e09a..5be9063545d 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -26,16 +26,16 @@ extern pgd_t init_level4_pgt[];
extern void paging_init(void);
#define pte_ERROR(e) \
- printk("%s:%d: bad pte %p(%016lx).\n", \
+ pr_err("%s:%d: bad pte %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pte_val(e))
#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %p(%016lx).\n", \
+ pr_err("%s:%d: bad pmd %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pmd_val(e))
#define pud_ERROR(e) \
- printk("%s:%d: bad pud %p(%016lx).\n", \
+ pr_err("%s:%d: bad pud %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pud_val(e))
#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %p(%016lx).\n", \
+ pr_err("%s:%d: bad pgd %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pgd_val(e))
struct mm_struct;
@@ -142,15 +142,13 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) ((void)(pte))/* NOP */
-#define update_mmu_cache(vma, address, ptep) do { } while (0)
-
/* Encode and de-code a swap entry */
-#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
-#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
+#ifdef CONFIG_NUMA_BALANCING
+/* Automatic NUMA balancing needs to be distinguishable from swap entries */
+#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 2)
#else
-#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
-#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
+#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
#endif
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
@@ -182,6 +180,11 @@ extern void cleanup_highmap(void);
#define __HAVE_ARCH_PTE_SAME
+#define vmemmap ((struct page *)VMEMMAP_START)
+
+extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
+extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 766ea16fbbb..7166e25ecb5 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_PGTABLE_64_DEFS_H
#define _ASM_X86_PGTABLE_64_DEFS_H
+#include <asm/sparsemem.h>
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -56,8 +58,12 @@ typedef struct { pteval_t pte; } pte_t;
#define VMALLOC_START _AC(0xffffc90000000000, UL)
#define VMALLOC_END _AC(0xffffe8ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffea0000000000, UL)
-#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
+#define ESPFIX_PGD_ENTRY _AC(-2, UL)
+#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
+
+#define EARLY_DYNAMIC_PAGE_TABLES 64
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 013286a10c2..f216963760e 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -16,15 +16,26 @@
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
-#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
-#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
+#define _PAGE_BIT_SOFTW1 9 /* available for programmer */
+#define _PAGE_BIT_SOFTW2 10 /* " */
+#define _PAGE_BIT_SOFTW3 11 /* " */
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
+#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */
+#define _PAGE_BIT_IOMAP _PAGE_BIT_SOFTW2 /* flag used to indicate IO mapping */
+#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
+#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+/*
+ * Swap offsets on configurations that allow automatic NUMA balancing use the
+ * bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from
+ * swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the
+ * maximum possible swap space from 16TB to 8TB.
+ */
+#define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1)
+
/* If _PAGE_BIT_PRESENT is clear, we use these: */
/* - if the user mapped it with PROT_NONE; pte_present gives true */
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
@@ -40,7 +51,7 @@
#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
@@ -55,6 +66,49 @@
#define _PAGE_HIDDEN (_AT(pteval_t, 0))
#endif
+/*
+ * The same hidden bit is used by kmemcheck, but since kmemcheck
+ * works on kernel pages while soft-dirty engine on user space,
+ * they do not conflict with each other.
+ */
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
+#else
+#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
+#endif
+
+/*
+ * _PAGE_NUMA distinguishes between a numa hinting minor fault and a page
+ * that is not present. The hinting fault gathers numa placement statistics
+ * (see pte_numa()). The bit is always zero when the PTE is not present.
+ *
+ * The bit picked must be always zero when the pmd is present and not
+ * present, so that we don't lose information when we set it while
+ * atomically clearing the present bit.
+ */
+#ifdef CONFIG_NUMA_BALANCING
+#define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA)
+#else
+#define _PAGE_NUMA (_AT(pteval_t, 0))
+#endif
+
+/*
+ * Tracking soft dirty bit when a page goes to a swap is tricky.
+ * We need a bit which can be stored in pte _and_ not conflict
+ * with swap entry format. On x86 bits 6 and 7 are *not* involved
+ * into swap entry computation, but bit 6 is used for nonlinear
+ * file mapping, so we borrow bit 7 for soft dirty tracking.
+ *
+ * Please note that this bit must be treated as swap dirty page
+ * mark if and only if the PTE has present bit clear!
+ */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
+#else
+#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
+#endif
+
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#else
@@ -71,8 +125,9 @@
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
- _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SOFT_DIRTY | _PAGE_NUMA)
+#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA)
#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
#define _PAGE_CACHE_WB (0)
@@ -163,20 +218,15 @@
#ifdef CONFIG_X86_64
#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
#else
-/*
- * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
- * bits are combined, this will alow user to access the high address mapped
- * VDSO in the presence of CONFIG_COMPAT_VDSO
- */
#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
#endif
#ifdef CONFIG_X86_32
-# include "pgtable_32_types.h"
+# include <asm/pgtable_32_types.h>
#else
-# include "pgtable_64_types.h"
+# include <asm/pgtable_64_types.h>
#endif
#ifndef __ASSEMBLY__
@@ -301,19 +351,16 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
/* Install a pte for a particular vaddr in kernel space. */
void set_pte_vaddr(unsigned long vaddr, pte_t pte);
-extern void native_pagetable_reserve(u64 start, u64 end);
#ifdef CONFIG_X86_32
-extern void native_pagetable_setup_start(pgd_t *base);
-extern void native_pagetable_setup_done(pgd_t *base);
+extern void native_pagetable_init(void);
#else
-#define native_pagetable_setup_start x86_init_pgd_noop
-#define native_pagetable_setup_done x86_init_pgd_noop
+#define native_pagetable_init paging_init
#endif
struct seq_file;
extern void arch_report_meminfo(struct seq_file *m);
-enum {
+enum pg_level {
PG_LEVEL_NONE,
PG_LEVEL_4K,
PG_LEVEL_2M,
@@ -334,7 +381,13 @@ static inline void update_page_count(int level, unsigned long pages) { }
* as a pte too.
*/
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
-
+extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned int *level);
+extern phys_addr_t slow_virt_to_phys(void *__address);
+extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
+ unsigned numpages, unsigned long page_flags);
+void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
+ unsigned numpages);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/arch/x86/include/asm/poll.h b/arch/x86/include/asm/poll.h
deleted file mode 100644
index c98509d3149..00000000000
--- a/arch/x86/include/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index bb7133dc155..f565f6dd59d 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -1,13 +1,5 @@
-#ifdef __KERNEL__
# ifdef CONFIG_X86_32
-# include "posix_types_32.h"
+# include <asm/posix_types_32.h>
# else
-# include "posix_types_64.h"
+# include <asm/posix_types_64.h>
# endif
-#else
-# ifdef __i386__
-# include "posix_types_32.h"
-# else
-# include "posix_types_64.h"
-# endif
-#endif
diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/asm/posix_types_32.h
deleted file mode 100644
index f7d9adf82e5..00000000000
--- a/arch/x86/include/asm/posix_types_32.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef _ASM_X86_POSIX_TYPES_32_H
-#define _ASM_X86_POSIX_TYPES_32_H
-
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc. Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned long __kernel_ino_t;
-typedef unsigned short __kernel_mode_t;
-typedef unsigned short __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
-typedef unsigned short __kernel_ipc_pid_t;
-typedef unsigned short __kernel_uid_t;
-typedef unsigned short __kernel_gid_t;
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef int __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
-typedef unsigned short __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#if defined(__KERNEL__)
-
-#undef __FD_SET
-#define __FD_SET(fd,fdsetp) \
- asm volatile("btsl %1,%0": \
- "+m" (*(__kernel_fd_set *)(fdsetp)) \
- : "r" ((int)(fd)))
-
-#undef __FD_CLR
-#define __FD_CLR(fd,fdsetp) \
- asm volatile("btrl %1,%0": \
- "+m" (*(__kernel_fd_set *)(fdsetp)) \
- : "r" ((int) (fd)))
-
-#undef __FD_ISSET
-#define __FD_ISSET(fd,fdsetp) \
- (__extension__ \
- ({ \
- unsigned char __result; \
- asm volatile("btl %1,%2 ; setb %0" \
- : "=q" (__result) \
- : "r" ((int)(fd)), \
- "m" (*(__kernel_fd_set *)(fdsetp))); \
- __result; \
-}))
-
-#undef __FD_ZERO
-#define __FD_ZERO(fdsetp) \
-do { \
- int __d0, __d1; \
- asm volatile("cld ; rep ; stosl" \
- : "=m" (*(__kernel_fd_set *)(fdsetp)), \
- "=&c" (__d0), "=&D" (__d1) \
- : "a" (0), "1" (__FDSET_LONGS), \
- "2" ((__kernel_fd_set *)(fdsetp)) \
- : "memory"); \
-} while (0)
-
-#endif /* defined(__KERNEL__) */
-
-#endif /* _ASM_X86_POSIX_TYPES_32_H */
diff --git a/arch/x86/include/asm/posix_types_64.h b/arch/x86/include/asm/posix_types_64.h
deleted file mode 100644
index eb8d2d92b63..00000000000
--- a/arch/x86/include/asm/posix_types_64.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef _ASM_X86_POSIX_TYPES_64_H
-#define _ASM_X86_POSIX_TYPES_64_H
-
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc. Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
-typedef unsigned long __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
-typedef int __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
-typedef __kernel_uid_t __kernel_uid32_t;
-typedef __kernel_gid_t __kernel_gid32_t;
-
-typedef unsigned long __kernel_old_dev_t;
-
-#ifdef __KERNEL__
-
-#undef __FD_SET
-static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant cases (8 or 32 longs,
- * for 256 and 1024-bit fd_sets respectively)
- */
-#undef __FD_ZERO
-static inline void __FD_ZERO(__kernel_fd_set *p)
-{
- unsigned long *tmp = p->fds_bits;
- int i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 32:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
- tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
- tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
- tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
- return;
- case 16:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- return;
- case 8:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
- return;
- case 4:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- return;
- }
- }
- i = __FDSET_LONGS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
-
-#endif /* defined(__KERNEL__) */
-
-#endif /* _ASM_X86_POSIX_TYPES_64_H */
diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/asm/prctl.h
deleted file mode 100644
index 3ac5032fae0..00000000000
--- a/arch/x86/include/asm/prctl.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _ASM_X86_PRCTL_H
-#define _ASM_X86_PRCTL_H
-
-#define ARCH_SET_GS 0x1001
-#define ARCH_SET_FS 0x1002
-#define ARCH_GET_FS 0x1003
-#define ARCH_GET_GS 0x1004
-
-#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
new file mode 100644
index 00000000000..7024c12f7bf
--- /dev/null
+++ b/arch/x86/include/asm/preempt.h
@@ -0,0 +1,111 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <asm/rmwcc.h>
+#include <asm/percpu.h>
+#include <linux/thread_info.h>
+
+DECLARE_PER_CPU(int, __preempt_count);
+
+/*
+ * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
+ * that a decrement hitting 0 means we can and should reschedule.
+ */
+#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
+
+/*
+ * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
+ * that think a non-zero value indicates we cannot preempt.
+ */
+static __always_inline int preempt_count(void)
+{
+ return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
+}
+
+static __always_inline void preempt_count_set(int pc)
+{
+ raw_cpu_write_4(__preempt_count, pc);
+}
+
+/*
+ * must be macros to avoid header recursion hell
+ */
+#define task_preempt_count(p) \
+ (task_thread_info(p)->saved_preempt_count & ~PREEMPT_NEED_RESCHED)
+
+#define init_task_preempt_count(p) do { \
+ task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
+ per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
+} while (0)
+
+/*
+ * We fold the NEED_RESCHED bit into the preempt count such that
+ * preempt_enable() can decrement and test for needing to reschedule with a
+ * single instruction.
+ *
+ * We invert the actual bit, so that when the decrement hits 0 we know we both
+ * need to resched (the bit is cleared) and can resched (no preempt count).
+ */
+
+static __always_inline void set_preempt_need_resched(void)
+{
+ raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+ raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+ return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
+}
+
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+ raw_cpu_add_4(__preempt_count, val);
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+ raw_cpu_add_4(__preempt_count, -val);
+}
+
+/*
+ * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+ GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(void)
+{
+ return unlikely(!raw_cpu_read_4(__preempt_count));
+}
+
+#ifdef CONFIG_PREEMPT
+ extern asmlinkage void ___preempt_schedule(void);
+# define __preempt_schedule() asm ("call ___preempt_schedule")
+ extern asmlinkage void preempt_schedule(void);
+# ifdef CONFIG_CONTEXT_TRACKING
+ extern asmlinkage void ___preempt_schedule_context(void);
+# define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
+# endif
+#endif
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index f8ab3eaad12..39fb618e221 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -1,103 +1,11 @@
#ifndef _ASM_X86_PROCESSOR_FLAGS_H
#define _ASM_X86_PROCESSOR_FLAGS_H
-/* Various flags defined: can be included from assembler. */
-/*
- * EFLAGS bits
- */
-#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
-#define X86_EFLAGS_BIT1 0x00000002 /* Bit 1 - always on */
-#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
-#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
-#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
-#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
-#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
-#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
-#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
-#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
-#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
-#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
-#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
-#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
-#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
-#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
-#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
-#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+#include <uapi/asm/processor-flags.h>
-/*
- * Basic CPU control in CR0
- */
-#define X86_CR0_PE 0x00000001 /* Protection Enable */
-#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
-#define X86_CR0_EM 0x00000004 /* Emulation */
-#define X86_CR0_TS 0x00000008 /* Task Switched */
-#define X86_CR0_ET 0x00000010 /* Extension Type */
-#define X86_CR0_NE 0x00000020 /* Numeric Error */
-#define X86_CR0_WP 0x00010000 /* Write Protect */
-#define X86_CR0_AM 0x00040000 /* Alignment Mask */
-#define X86_CR0_NW 0x20000000 /* Not Write-through */
-#define X86_CR0_CD 0x40000000 /* Cache Disable */
-#define X86_CR0_PG 0x80000000 /* Paging */
-
-/*
- * Paging options in CR3
- */
-#define X86_CR3_PWT 0x00000008 /* Page Write Through */
-#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
-
-/*
- * Intel CPU features in CR4
- */
-#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
-#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
-#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
-#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
-#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
-#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
-#define X86_CR4_MCE 0x00000040 /* Machine check enable */
-#define X86_CR4_PGE 0x00000080 /* enable global pages */
-#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
-#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
-#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
-#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
-#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
-#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
-#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
-
-/*
- * x86-64 Task Priority Register, CR8
- */
-#define X86_CR8_TPR 0x0000000F /* task priority register */
-
-/*
- * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
- */
-
-/*
- * NSC/Cyrix CPU configuration register indexes
- */
-#define CX86_PCR0 0x20
-#define CX86_GCR 0xb8
-#define CX86_CCR0 0xc0
-#define CX86_CCR1 0xc1
-#define CX86_CCR2 0xc2
-#define CX86_CCR3 0xc3
-#define CX86_CCR4 0xe8
-#define CX86_CCR5 0xe9
-#define CX86_CCR6 0xea
-#define CX86_CCR7 0xeb
-#define CX86_PCR1 0xf0
-#define CX86_DIR0 0xfe
-#define CX86_DIR1 0xff
-#define CX86_ARR_BASE 0xc4
-#define CX86_RCR_BASE 0xdc
-
-#ifdef __KERNEL__
#ifdef CONFIG_VM86
#define X86_VM_MASK X86_EFLAGS_VM
#else
#define X86_VM_MASK 0 /* No VM86 support */
#endif
-#endif
-
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index f7c89e231c6..a4ea02351f4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -14,21 +14,29 @@ struct mm_struct;
#include <asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeature.h>
-#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
#include <asm/msr.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
+#include <asm/special_insns.h>
#include <linux/personality.h>
#include <linux/cpumask.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/math64.h>
-#include <linux/init.h>
#include <linux/err.h>
+#include <linux/irqflags.h>
+
+/*
+ * We handle most unaligned accesses in hardware. On the other hand
+ * unaligned DMA can be quite expensive on some Nehalem processors.
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN 0
#define HBP_NUM 4
/*
@@ -52,6 +60,20 @@ static inline void *current_text_addr(void)
# define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
+enum tlb_infos {
+ ENTRIES,
+ NR_INFO
+};
+
+extern u16 __read_mostly tlb_lli_4k[NR_INFO];
+extern u16 __read_mostly tlb_lli_2m[NR_INFO];
+extern u16 __read_mostly tlb_lli_4m[NR_INFO];
+extern u16 __read_mostly tlb_lld_4k[NR_INFO];
+extern u16 __read_mostly tlb_lld_2m[NR_INFO];
+extern u16 __read_mostly tlb_lld_4m[NR_INFO];
+extern u16 __read_mostly tlb_lld_1g[NR_INFO];
+extern s8 __read_mostly tlb_flushall_shift;
+
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
* Members of this structure are referenced in head.S, so think twice
@@ -67,13 +89,9 @@ struct cpuinfo_x86 {
char wp_works_ok; /* It doesn't on 386's */
/* Problems on some 486Dx4's and old 386's: */
- char hlt_works_ok;
- char hard_math;
char rfu;
- char fdiv_bug;
- char f00f_bug;
- char coma_bug;
char pad0;
+ char pad1;
#else
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
int x86_tlbsize;
@@ -86,7 +104,7 @@ struct cpuinfo_x86 {
__u32 extended_cpuid_level;
/* Maximum supported CPUID level, -1=no CPUID: */
int cpuid_level;
- __u32 x86_capability[NCAPINTS];
+ __u32 x86_capability[NCAPINTS + NBUGINTS];
char x86_vendor_id[16];
char x86_model_id[64];
/* in KB - valid for CPUS which support this call: */
@@ -143,32 +161,31 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
extern const struct seq_operations cpuinfo_op;
-static inline int hlt_works(int cpu)
-{
-#ifdef CONFIG_X86_32
- return cpu_data(cpu).hlt_works_ok;
-#else
- return 1;
-#endif
-}
-
#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern void cpu_detect(struct cpuinfo_x86 *c);
-
-extern struct pt_regs *idle_regs(struct pt_regs *);
+extern void fpu_detect(struct cpuinfo_x86 *c);
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
+void print_cpu_msr(struct cpuinfo_x86 *);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
-extern unsigned short num_cache_leaves;
+extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
extern void detect_extended_topology(struct cpuinfo_x86 *c);
extern void detect_ht(struct cpuinfo_x86 *c);
+#ifdef CONFIG_X86_32
+extern int have_cpuid_p(void);
+#else
+static inline int have_cpuid_p(void)
+{
+ return 1;
+}
+#endif
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
@@ -353,6 +370,20 @@ struct ymmh_struct {
u32 ymmh_space[64];
};
+/* We don't support LWP yet: */
+struct lwp_struct {
+ u8 reserved[128];
+};
+
+struct bndregs_struct {
+ u64 bndregs[8];
+} __packed;
+
+struct bndcsr_struct {
+ u64 cfg_reg_u;
+ u64 status_reg;
+} __packed;
+
struct xsave_hdr_struct {
u64 xstate_bv;
u64 reserved1[2];
@@ -363,6 +394,9 @@ struct xsave_struct {
struct i387_fxsave_struct i387;
struct xsave_hdr_struct xsave_hdr;
struct ymmh_struct ymmh;
+ struct lwp_struct lwp;
+ struct bndregs_struct bndregs;
+ struct bndcsr_struct bndcsr;
/* new processor state extensions will go here */
} __attribute__ ((packed, aligned (64)));
@@ -374,6 +408,8 @@ union thread_xstate {
};
struct fpu {
+ unsigned int last_cpu;
+ unsigned int has_fpu;
union thread_xstate *state;
};
@@ -393,12 +429,11 @@ union irq_stack_union {
};
};
-DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
+DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
DECLARE_INIT_PER_CPU(irq_stack_union);
DECLARE_PER_CPU(char *, irq_stack_ptr);
DECLARE_PER_CPU(unsigned int, irq_count);
-extern unsigned long kernel_eflags;
extern asmlinkage void ignore_sysret(void);
#else /* X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
@@ -414,6 +449,15 @@ struct stack_canary {
};
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
+/*
+ * per-CPU IRQ handling stacks
+ */
+struct irq_stack {
+ u32 stack[THREAD_SIZE/sizeof(u32)];
+} __aligned(THREAD_SIZE);
+
+DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
+DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
#endif /* X86_64 */
extern unsigned int xstate_size;
@@ -451,10 +495,9 @@ struct thread_struct {
unsigned long ptrace_dr7;
/* Fault info: */
unsigned long cr2;
- unsigned long trap_no;
+ unsigned long trap_nr;
unsigned long error_code;
/* floating point and extended processor state */
- unsigned long has_fpu;
struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
@@ -471,63 +514,17 @@ struct thread_struct {
unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
+ /*
+ * fpu_counter contains the number of consecutive context switches
+ * that the FPU is used. If this is over a threshold, the lazy fpu
+ * saving becomes unlazy to save the trap. This is an unsigned char
+ * so that after 256 times the counter wraps and the behavior turns
+ * lazy again; this to deal with bursty apps that only use FPU for
+ * a short time
+ */
+ unsigned char fpu_counter;
};
-static inline unsigned long native_get_debugreg(int regno)
-{
- unsigned long val = 0; /* Damn you, gcc! */
-
- switch (regno) {
- case 0:
- asm("mov %%db0, %0" :"=r" (val));
- break;
- case 1:
- asm("mov %%db1, %0" :"=r" (val));
- break;
- case 2:
- asm("mov %%db2, %0" :"=r" (val));
- break;
- case 3:
- asm("mov %%db3, %0" :"=r" (val));
- break;
- case 6:
- asm("mov %%db6, %0" :"=r" (val));
- break;
- case 7:
- asm("mov %%db7, %0" :"=r" (val));
- break;
- default:
- BUG();
- }
- return val;
-}
-
-static inline void native_set_debugreg(int regno, unsigned long value)
-{
- switch (regno) {
- case 0:
- asm("mov %0, %%db0" ::"r" (value));
- break;
- case 1:
- asm("mov %0, %%db1" ::"r" (value));
- break;
- case 2:
- asm("mov %0, %%db2" ::"r" (value));
- break;
- case 3:
- asm("mov %0, %%db3" ::"r" (value));
- break;
- case 6:
- asm("mov %0, %%db6" ::"r" (value));
- break;
- case 7:
- asm("mov %0, %%db7" ::"r" (value));
- break;
- default:
- BUG();
- }
-}
-
/*
* Set IOPL bits in EFLAGS from given mask
*/
@@ -573,14 +570,6 @@ static inline void native_swapgs(void)
#define __cpuid native_cpuid
#define paravirt_enabled() 0
-/*
- * These special macros can be used to get or set a debugging register
- */
-#define get_debugreg(var, register) \
- (var) = native_get_debugreg(register)
-#define set_debugreg(value, register) \
- native_set_debugreg(register, value)
-
static inline void load_sp0(struct tss_struct *tss,
struct thread_struct *thread)
{
@@ -596,13 +585,16 @@ static inline void load_sp0(struct tss_struct *tss,
* enable), so that any CPU's that boot up
* after us can get the correct flags.
*/
-extern unsigned long mmu_cr4_features;
+extern unsigned long mmu_cr4_features;
+extern u32 *trampoline_cr4_features;
static inline void set_in_cr4(unsigned long mask)
{
unsigned long cr4;
mmu_cr4_features |= mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 |= mask;
write_cr4(cr4);
@@ -613,6 +605,8 @@ static inline void clear_in_cr4(unsigned long mask)
unsigned long cr4;
mmu_cr4_features &= ~mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 &= ~mask;
write_cr4(cr4);
@@ -623,17 +617,9 @@ typedef struct {
} mm_segment_t;
-/*
- * create a kernel thread without removing it from tasklists
- */
-extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-/* Prepare to copy thread state - unlazy all lazy state */
-extern void prepare_to_copy(struct task_struct *tsk);
-
unsigned long get_wchan(struct task_struct *p);
/*
@@ -715,41 +701,29 @@ static inline void sync_core(void)
{
int tmp;
-#if defined(CONFIG_M386) || defined(CONFIG_M486)
- if (boot_cpu_data.x86 < 5)
- /* There is no speculative execution.
- * jmp is a barrier to prefetching. */
- asm volatile("jmp 1f\n1:\n" ::: "memory");
- else
+#ifdef CONFIG_M486
+ /*
+ * Do a CPUID if available, otherwise do a jump. The jump
+ * can conveniently enough be the jump around CPUID.
+ */
+ asm volatile("cmpl %2,%1\n\t"
+ "jl 1f\n\t"
+ "cpuid\n"
+ "1:"
+ : "=a" (tmp)
+ : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
+ : "ebx", "ecx", "edx", "memory");
+#else
+ /*
+ * CPUID is a barrier to speculative execution.
+ * Prefetched instructions are automatically
+ * invalidated when modified.
+ */
+ asm volatile("cpuid"
+ : "=a" (tmp)
+ : "0" (1)
+ : "ebx", "ecx", "edx", "memory");
#endif
- /* cpuid is a barrier to speculative execution.
- * Prefetched instructions are automatically
- * invalidated when modified. */
- asm volatile("cpuid" : "=a" (tmp) : "0" (1)
- : "ebx", "ecx", "edx", "memory");
-}
-
-static inline void __monitor(const void *eax, unsigned long ecx,
- unsigned long edx)
-{
- /* "monitor %eax, %ecx, %edx;" */
- asm volatile(".byte 0x0f, 0x01, 0xc8;"
- :: "a" (eax), "c" (ecx), "d"(edx));
-}
-
-static inline void __mwait(unsigned long eax, unsigned long ecx)
-{
- /* "mwait %eax, %ecx;" */
- asm volatile(".byte 0x0f, 0x01, 0xc9;"
- :: "a" (eax), "c" (ecx));
-}
-
-static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
-{
- trace_hardirqs_on();
- /* "mwait %eax, %ecx;" */
- asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
- :: "a" (eax), "c" (ecx));
}
extern void select_idle_routine(const struct cpuinfo_x86 *c);
@@ -759,12 +733,13 @@ extern unsigned long boot_option_idle_override;
extern bool amd_e400_c1e_detected;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
- IDLE_POLL, IDLE_FORCE_MWAIT};
+ IDLE_POLL};
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
extern void early_trap_init(void);
+void early_trap_pf_init(void);
/* Defined in head.S */
extern struct desc_ptr early_gdt_descr;
@@ -796,6 +771,8 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}
+extern void set_task_blockstep(struct task_struct *task, bool on);
+
/*
* from system description table in BIOS. Mostly for MCA use, but
* others may find it useful:
@@ -925,9 +902,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
0xc0000000 : 0xFFFFe000)
-#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
+#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
+#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
#define STACK_TOP TASK_SIZE
@@ -949,6 +926,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
extern unsigned long KSTK_ESP(struct task_struct *task);
+
+/*
+ * User space RSP while inside the SYSCALL fast path
+ */
+DECLARE_PER_CPU(unsigned long, old_rsp);
+
#endif /* CONFIG_X86_64 */
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
@@ -969,55 +952,33 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);
-extern int amd_get_nb_id(int cpu);
-
-struct aperfmperf {
- u64 aperf, mperf;
-};
+extern u16 amd_get_nb_id(int cpu);
-static inline void get_aperfmperf(struct aperfmperf *am)
+static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
{
- WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
-
- rdmsrl(MSR_IA32_APERF, am->aperf);
- rdmsrl(MSR_IA32_MPERF, am->mperf);
-}
+ uint32_t base, eax, signature[3];
-#define APERFMPERF_SHIFT 10
-
-static inline
-unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
- struct aperfmperf *new)
-{
- u64 aperf = new->aperf - old->aperf;
- u64 mperf = new->mperf - old->mperf;
- unsigned long ratio = aperf;
+ for (base = 0x40000000; base < 0x40010000; base += 0x100) {
+ cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
- mperf >>= APERFMPERF_SHIFT;
- if (mperf)
- ratio = div64_u64(aperf, mperf);
+ if (!memcmp(sig, signature, 12) &&
+ (leaves == 0 || ((eax - base) >= leaves)))
+ return base;
+ }
- return ratio;
+ return 0;
}
-/*
- * AMD errata checking
- */
-#ifdef CONFIG_CPU_SUP_AMD
-extern const int amd_erratum_383[];
-extern const int amd_erratum_400[];
-extern bool cpu_has_amd_erratum(const int *);
-
-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
+extern unsigned long arch_align_stack(unsigned long sp);
+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+void default_idle(void);
+#ifdef CONFIG_XEN
+bool xen_set_default_idle(void);
#else
-#define cpu_has_amd_erratum(x) (false)
-#endif /* CONFIG_CPU_SUP_AMD */
+#define xen_set_default_idle 0
+#endif
+void stop_this_cpu(void *dummy);
+void df_debug(struct pt_regs *regs, long error_code);
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 644dd885f05..fbeb06ed0ea 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -21,14 +21,13 @@
#include <asm/irq.h>
#include <linux/atomic.h>
#include <asm/setup.h>
-#include <asm/irq_controller.h>
#ifdef CONFIG_OF
extern int of_ioapic;
extern u64 initial_dtb;
extern void add_dtb(u64 data);
extern void x86_add_irq_domains(void);
-void __cpuinit x86_of_pci_init(void);
+void x86_of_pci_init(void);
void x86_dtb_init(void);
#else
static inline void add_dtb(u64 data) { }
@@ -40,19 +39,5 @@ static inline void x86_dtb_init(void) { }
extern char cmd_line[COMMAND_LINE_SIZE];
-#define pci_address_to_pio pci_address_to_pio
-unsigned long pci_address_to_pio(phys_addr_t addr);
-
-/**
- * irq_dispose_mapping - Unmap an interrupt
- * @virq: linux virq number of the interrupt to unmap
- *
- * FIXME: We really should implement proper virq handling like power,
- * but that's going to be major surgery.
- */
-static inline void irq_dispose_mapping(unsigned int virq) { }
-
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 6f414ed8862..a90f8972dad 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -5,8 +5,6 @@
/* misc architecture specific prototypes */
-void early_idt_handler(void);
-
void system_call(void);
void syscall_init(void);
@@ -14,8 +12,6 @@ void ia32_syscall(void);
void ia32_cstar_target(void);
void ia32_sysenter_target(void);
-void syscall32_cpu_init(void);
-
void x86_configure_nx(void);
void x86_report_nx(void);
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
deleted file mode 100644
index 7b0a55a8885..00000000000
--- a/arch/x86/include/asm/ptrace-abi.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef _ASM_X86_PTRACE_ABI_H
-#define _ASM_X86_PTRACE_ABI_H
-
-#ifdef __i386__
-
-#define EBX 0
-#define ECX 1
-#define EDX 2
-#define ESI 3
-#define EDI 4
-#define EBP 5
-#define EAX 6
-#define DS 7
-#define ES 8
-#define FS 9
-#define GS 10
-#define ORIG_EAX 11
-#define EIP 12
-#define CS 13
-#define EFL 14
-#define UESP 15
-#define SS 16
-#define FRAME_SIZE 17
-
-#else /* __i386__ */
-
-#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
-#define R15 0
-#define R14 8
-#define R13 16
-#define R12 24
-#define RBP 32
-#define RBX 40
-/* arguments: interrupts/non tracing syscalls only save up to here*/
-#define R11 48
-#define R10 56
-#define R9 64
-#define R8 72
-#define RAX 80
-#define RCX 88
-#define RDX 96
-#define RSI 104
-#define RDI 112
-#define ORIG_RAX 120 /* = ERROR */
-/* end of arguments */
-/* cpu exception frame or undefined in case of fast syscall. */
-#define RIP 128
-#define CS 136
-#define EFLAGS 144
-#define RSP 152
-#define SS 160
-#define ARGOFFSET R11
-#endif /* __ASSEMBLY__ */
-
-/* top of stack page */
-#define FRAME_SIZE 168
-
-#endif /* !__i386__ */
-
-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-#define PTRACE_GETFPREGS 14
-#define PTRACE_SETFPREGS 15
-#define PTRACE_GETFPXREGS 18
-#define PTRACE_SETFPXREGS 19
-
-#define PTRACE_OLDSETOPTIONS 21
-
-/* only useful for access 32bit programs / kernels */
-#define PTRACE_GET_THREAD_AREA 25
-#define PTRACE_SET_THREAD_AREA 26
-
-#ifdef __x86_64__
-# define PTRACE_ARCH_PRCTL 30
-#endif
-
-#define PTRACE_SYSEMU 31
-#define PTRACE_SYSEMU_SINGLESTEP 32
-
-#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#endif
-
-#endif /* _ASM_X86_PTRACE_ABI_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 35664547125..6205f0c434d 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -1,44 +1,12 @@
#ifndef _ASM_X86_PTRACE_H
#define _ASM_X86_PTRACE_H
-#include <linux/compiler.h> /* For __user */
-#include <asm/ptrace-abi.h>
-#include <asm/processor-flags.h>
-
-#ifdef __KERNEL__
#include <asm/segment.h>
#include <asm/page_types.h>
-#endif
+#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
-
#ifdef __i386__
-/* this struct defines the way the registers are stored on the
- stack during a system call. */
-
-#ifndef __KERNEL__
-
-struct pt_regs {
- long ebx;
- long ecx;
- long edx;
- long esi;
- long edi;
- long ebp;
- long eax;
- int xds;
- int xes;
- int xfs;
- int xgs;
- long orig_eax;
- long eip;
- int xcs;
- long eflags;
- long esp;
- int xss;
-};
-
-#else /* __KERNEL__ */
struct pt_regs {
unsigned long bx;
@@ -60,42 +28,8 @@ struct pt_regs {
unsigned long ss;
};
-#endif /* __KERNEL__ */
-
#else /* __i386__ */
-#ifndef __KERNEL__
-
-struct pt_regs {
- unsigned long r15;
- unsigned long r14;
- unsigned long r13;
- unsigned long r12;
- unsigned long rbp;
- unsigned long rbx;
-/* arguments: non interrupts/non tracing syscalls only save up to here*/
- unsigned long r11;
- unsigned long r10;
- unsigned long r9;
- unsigned long r8;
- unsigned long rax;
- unsigned long rcx;
- unsigned long rdx;
- unsigned long rsi;
- unsigned long rdi;
- unsigned long orig_rax;
-/* end of arguments */
-/* cpu exception frame or undefined */
- unsigned long rip;
- unsigned long cs;
- unsigned long eflags;
- unsigned long rsp;
- unsigned long ss;
-/* top of stack page */
-};
-
-#else /* __KERNEL__ */
-
struct pt_regs {
unsigned long r15;
unsigned long r14;
@@ -124,13 +58,8 @@ struct pt_regs {
/* top of stack page */
};
-#endif /* __KERNEL__ */
#endif /* !__i386__ */
-
-#ifdef __KERNEL__
-
-#include <linux/init.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt_types.h>
#endif
@@ -145,7 +74,6 @@ extern unsigned long
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code);
-void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
extern long syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_leave(struct pt_regs *);
@@ -204,23 +132,23 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
#endif
}
+
+#define current_user_stack_pointer() this_cpu_read(old_rsp)
+/* ia32 vs. x32 difference */
+#define compat_user_stack_pointer() \
+ (test_thread_flag(TIF_IA32) \
+ ? current_pt_regs()->sp \
+ : this_cpu_read(old_rsp))
#endif
-/*
- * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
- * when it traps. The previous stack will be directly underneath the saved
- * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
- *
- * This is valid only for kernel mode traps.
- */
-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
-{
#ifdef CONFIG_X86_32
- return (unsigned long)(&regs->sp);
+extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
#else
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
return regs->sp;
-#endif
}
+#endif
#define GET_IP(regs) ((regs)->ip)
#define GET_FP(regs) ((regs)->bp)
@@ -247,6 +175,15 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
+#ifdef CONFIG_X86_32
+ /*
+ * Traps from the kernel do not save sp and ss.
+ * Use the helper function to retrieve sp.
+ */
+ if (offset == offsetof(struct pt_regs, sp) &&
+ regs->cs == __KERNEL_CS)
+ return kernel_stack_pointer(regs);
+#endif
return *(unsigned long *)((unsigned long)regs + offset);
}
@@ -294,14 +231,27 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
#define ARCH_HAS_USER_SINGLE_STEP_INFO
+/*
+ * When hitting ptrace_stop(), we cannot return using SYSRET because
+ * that does not restore the full CPU state, only a minimal set. The
+ * ptracer can change arbitrary register values, which is usually okay
+ * because the usual ptrace stops run off the signal delivery path which
+ * forces IRET; however, ptrace_event() stops happen in arbitrary places
+ * in the kernel and don't force IRET path.
+ *
+ * So force IRET path after a ptrace stop.
+ */
+#define arch_ptrace_stop_needed(code, info) \
+({ \
+ set_thread_flag(TIF_NOTIFY_RESUME); \
+ false; \
+})
+
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate);
-#endif /* __KERNEL__ */
-
#endif /* !__ASSEMBLY__ */
-
#endif /* _ASM_X86_PTRACE_H */
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 35f2d1948ad..6167fd79818 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -40,5 +40,6 @@ struct pvclock_wall_clock {
} __attribute__((__packed__));
#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define PVCLOCK_GUEST_STOPPED (1 << 1)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index c59cc97fe6c..d6b078e9fa2 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -6,6 +6,7 @@
/* some helper functions for xen and kvm pv clock sources */
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
+u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
void pvclock_set_flags(u8 flags);
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
@@ -13,6 +14,8 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
struct timespec *ts);
void pvclock_resume(void);
+void pvclock_touch_watchdogs(void);
+
/*
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
* yielding a 64-bit result.
@@ -56,4 +59,49 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
return product;
}
+static __always_inline
+u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
+{
+ u64 delta = __native_read_tsc() - src->tsc_timestamp;
+ return pvclock_scale_delta(delta, src->tsc_to_system_mul,
+ src->tsc_shift);
+}
+
+static __always_inline
+unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
+ cycle_t *cycles, u8 *flags)
+{
+ unsigned version;
+ cycle_t ret, offset;
+ u8 ret_flags;
+
+ version = src->version;
+ /* Note: emulated platforms which do not advertise SSE2 support
+ * result in kvmclock not using the necessary RDTSC barriers.
+ * Without barriers, it is possible that RDTSC instruction reads from
+ * the time stamp counter outside rdtsc_barrier protected section
+ * below, resulting in violation of monotonicity.
+ */
+ rdtsc_barrier();
+ offset = pvclock_get_nsec_offset(src);
+ ret = src->system_time + offset;
+ ret_flags = src->flags;
+ rdtsc_barrier();
+
+ *cycles = ret;
+ *flags = ret_flags;
+ return version;
+}
+
+struct pvclock_vsyscall_time_info {
+ struct pvclock_vcpu_time_info pvti;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
+#define PVCLOCK_VSYSCALL_NR_PAGES (((NR_CPUS-1)/(PAGE_SIZE/PVTI_SIZE))+1)
+
+int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
+ int size);
+struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu);
+
#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
new file mode 100644
index 00000000000..70f46f07f94
--- /dev/null
+++ b/arch/x86/include/asm/qrwlock.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_X86_QRWLOCK_H
+#define _ASM_X86_QRWLOCK_H
+
+#include <asm-generic/qrwlock_types.h>
+
+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
+#define queue_write_unlock queue_write_unlock
+static inline void queue_write_unlock(struct qrwlock *lock)
+{
+ barrier();
+ ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
+}
+#endif
+
+#include <asm-generic/qrwlock.h>
+
+#endif /* _ASM_X86_QRWLOCK_H */
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
new file mode 100644
index 00000000000..9c6b890d5e7
--- /dev/null
+++ b/arch/x86/include/asm/realmode.h
@@ -0,0 +1,64 @@
+#ifndef _ARCH_X86_REALMODE_H
+#define _ARCH_X86_REALMODE_H
+
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* This must match data at realmode.S */
+struct real_mode_header {
+ u32 text_start;
+ u32 ro_end;
+ /* SMP trampoline */
+ u32 trampoline_start;
+ u32 trampoline_status;
+ u32 trampoline_header;
+#ifdef CONFIG_X86_64
+ u32 trampoline_pgd;
+#endif
+ /* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+ u32 wakeup_start;
+ u32 wakeup_header;
+#endif
+ /* APM/BIOS reboot */
+ u32 machine_real_restart_asm;
+#ifdef CONFIG_X86_64
+ u32 machine_real_restart_seg;
+#endif
+};
+
+/* This must match data at trampoline_32/64.S */
+struct trampoline_header {
+#ifdef CONFIG_X86_32
+ u32 start;
+ u16 gdt_pad;
+ u16 gdt_limit;
+ u32 gdt_base;
+#else
+ u64 start;
+ u64 efer;
+ u32 cr4;
+#endif
+};
+
+extern struct real_mode_header *real_mode_header;
+extern unsigned char real_mode_blob_end[];
+
+extern unsigned long init_rsp;
+extern unsigned long initial_code;
+extern unsigned long initial_gs;
+
+extern unsigned char real_mode_blob[];
+extern unsigned char real_mode_relocs[];
+
+#ifdef CONFIG_X86_32
+extern unsigned char startup_32_smp[];
+extern unsigned char boot_gdt[];
+#else
+extern unsigned char secondary_startup_64[];
+#endif
+
+void reserve_real_mode(void);
+void setup_real_mode(void);
+
+#endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 92f297069e8..a82c4f1b4d8 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -18,8 +18,8 @@ extern struct machine_ops machine_ops;
void native_machine_crash_shutdown(struct pt_regs *regs);
void native_machine_shutdown(void);
-void machine_real_restart(unsigned int type);
-/* These must match dispatch_table in reboot_32.S */
+void __noreturn machine_real_restart(unsigned int type);
+/* These must match dispatch in arch/x86/realmore/rm/reboot.S */
#define MRR_BIOS 0
#define MRR_APM 1
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 6c7fc25f2c3..5c6e4fb370f 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -47,6 +47,12 @@
# define NEED_NOPL 0
#endif
+#ifdef CONFIG_MATOM
+# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
+#else
+# define NEED_MOVBE 0
+#endif
+
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT
/* Paravirtualized systems may not have PSE or PGE available */
@@ -80,7 +86,7 @@
#define REQUIRED_MASK2 0
#define REQUIRED_MASK3 (NEED_NOPL)
-#define REQUIRED_MASK4 0
+#define REQUIRED_MASK4 (NEED_MOVBE)
#define REQUIRED_MASK5 0
#define REQUIRED_MASK6 0
#define REQUIRED_MASK7 0
diff --git a/arch/x86/include/asm/resource.h b/arch/x86/include/asm/resource.h
deleted file mode 100644
index 04bc4db8921..00000000000
--- a/arch/x86/include/asm/resource.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/resource.h>
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
new file mode 100644
index 00000000000..8f7866a5b9a
--- /dev/null
+++ b/arch/x86/include/asm/rmwcc.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_X86_RMWcc
+#define _ASM_X86_RMWcc
+
+#ifdef CC_HAVE_ASM_GOTO
+
+#define __GEN_RMWcc(fullop, var, cc, ...) \
+do { \
+ asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
+ : : "m" (var), ## __VA_ARGS__ \
+ : "memory" : cc_label); \
+ return 0; \
+cc_label: \
+ return 1; \
+} while (0)
+
+#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
+ __GEN_RMWcc(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
+ __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
+
+#else /* !CC_HAVE_ASM_GOTO */
+
+#define __GEN_RMWcc(fullop, var, cc, ...) \
+do { \
+ char c; \
+ asm volatile (fullop "; set" cc " %1" \
+ : "+m" (var), "=qm" (c) \
+ : __VA_ARGS__ : "memory"); \
+ return c != 0; \
+} while (0)
+
+#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
+ __GEN_RMWcc(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
+ __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
+
+#endif /* CC_HAVE_ASM_GOTO */
+
+#endif /* _ASM_X86_RMWcc */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 2dbe4a721ce..cad82c9c2fd 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -105,8 +105,8 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
asm volatile("# beginning down_write\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
/* adds 0xffff0001, returns the old value */
- " test %1,%1\n\t"
- /* was the count 0 before? */
+ " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
+ /* was the active mask 0 before? */
" jz 1f\n"
" call call_rwsem_down_write_failed\n"
"1:\n"
@@ -126,11 +126,25 @@ static inline void __down_write(struct rw_semaphore *sem)
*/
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
- if (ret == RWSEM_UNLOCKED_VALUE)
- return 1;
- return 0;
+ long result, tmp;
+ asm volatile("# beginning __down_write_trylock\n\t"
+ " mov %0,%1\n\t"
+ "1:\n\t"
+ " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
+ /* was the active mask 0 before? */
+ " jnz 2f\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " jnz 1b\n\t"
+ "2:\n\t"
+ " sete %b1\n\t"
+ " movzbl %b1, %k1\n\t"
+ "# ending __down_write_trylock\n\t"
+ : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+ : "er" (RWSEM_ACTIVE_WRITE_BIAS)
+ : "memory", "cc");
+ return result;
}
/*
diff --git a/arch/x86/include/asm/seccomp.h b/arch/x86/include/asm/seccomp.h
index c62e58a5a90..0f3d7f09922 100644
--- a/arch/x86/include/asm/seccomp.h
+++ b/arch/x86/include/asm/seccomp.h
@@ -1,5 +1,5 @@
#ifdef CONFIG_X86_32
-# include "seccomp_32.h"
+# include <asm/seccomp_32.h>
#else
-# include "seccomp_64.h"
+# include <asm/seccomp_64.h>
#endif
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 5e641715c3f..6f1c3a8a33a 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -205,14 +205,73 @@
#define IDT_ENTRIES 256
#define NUM_EXCEPTION_VECTORS 32
+/* Bitmask of exception vectors which push an error code on the stack */
+#define EXCEPTION_ERRCODE_MASK 0x00027d00
#define GDT_SIZE (GDT_ENTRIES * 8)
#define GDT_ENTRY_TLS_ENTRIES 3
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
-extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
-#endif
+extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
+#ifdef CONFIG_TRACING
+#define trace_early_idt_handlers early_idt_handlers
#endif
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg, value) \
+do { \
+ unsigned short __val = (value); \
+ \
+ asm volatile(" \n" \
+ "1: movl %k0,%%" #seg " \n" \
+ \
+ ".section .fixup,\"ax\" \n" \
+ "2: xorl %k0,%k0 \n" \
+ " jmp 1b \n" \
+ ".previous \n" \
+ \
+ _ASM_EXTABLE(1b, 2b) \
+ \
+ : "+r" (__val) : : "memory"); \
+} while (0)
+
+/*
+ * Save a segment register away
+ */
+#define savesegment(seg, value) \
+ asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
+
+/*
+ * x86_32 user gs accessors.
+ */
+#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_32_LAZY_GS
+#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
+#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
+#define task_user_gs(tsk) ((tsk)->thread.gs)
+#define lazy_save_gs(v) savesegment(gs, (v))
+#define lazy_load_gs(v) loadsegment(gs, (v))
+#else /* X86_32_LAZY_GS */
+#define get_user_gs(regs) (u16)((regs)->gs)
+#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
+#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
+#define lazy_save_gs(v) do { } while (0)
+#define lazy_load_gs(v) do { } while (0)
+#endif /* X86_32_LAZY_GS */
+#endif /* X86_32 */
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+ return __limit + 1;
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
#endif /* _ASM_X86_SEGMENT_H */
diff --git a/arch/x86/include/asm/sembuf.h b/arch/x86/include/asm/sembuf.h
deleted file mode 100644
index ee50c801f7b..00000000000
--- a/arch/x86/include/asm/sembuf.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _ASM_X86_SEMBUF_H
-#define _ASM_X86_SEMBUF_H
-
-/*
- * The semid64_ds structure for x86 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-struct semid64_ds {
- struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
- unsigned long __unused1;
- __kernel_time_t sem_ctime; /* last change time */
- unsigned long __unused2;
- unsigned long sem_nsems; /* no. of semaphores in array */
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _ASM_X86_SEMBUF_H */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index d0f19f9fb84..ff4e7b236e2 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -1,10 +1,12 @@
#ifndef _ASM_X86_SETUP_H
#define _ASM_X86_SETUP_H
-#ifdef __KERNEL__
+#include <uapi/asm/setup.h>
#define COMMAND_LINE_SIZE 2048
+#include <linux/linkage.h>
+
#ifdef __i386__
#include <linux/pfn.h>
@@ -26,6 +28,8 @@
#include <asm/bootparam.h>
#include <asm/x86_init.h>
+extern u64 relocated_ramdisk;
+
/* Interrupt control for vSMPowered x86_64 systems */
#ifdef CONFIG_X86_64
void vsmp_init(void);
@@ -35,12 +39,6 @@ static inline void vsmp_init(void) { }
void setup_bios_corruption_check(void);
-#ifdef CONFIG_X86_VISWS
-extern void visws_early_detect(void);
-#else
-static inline void visws_early_detect(void) { }
-#endif
-
extern unsigned long saved_video_mode;
extern void reserve_standard_io_resources(void);
@@ -48,9 +46,9 @@ extern void i386_reserve_resources(void);
extern void setup_default_timer_irq(void);
#ifdef CONFIG_X86_INTEL_MID
-extern void x86_mrst_early_setup(void);
+extern void x86_intel_mid_early_setup(void);
#else
-static inline void x86_mrst_early_setup(void) { }
+static inline void x86_intel_mid_early_setup(void) { }
#endif
#ifdef CONFIG_X86_INTEL_CE
@@ -61,6 +59,8 @@ static inline void x86_ce4100_early_setup(void) { }
#ifndef _SETUP
+#include <asm/espfix.h>
+
/*
* This is set up by the setup-routine at boot-time
*/
@@ -107,11 +107,11 @@ void *extend_brk(size_t size, size_t align);
extern void probe_roms(void);
#ifdef __i386__
-void __init i386_start_kernel(void);
+asmlinkage void __init i386_start_kernel(void);
#else
-void __init x86_64_start_kernel(char *real_mode);
-void __init x86_64_start_reservations(char *real_mode_data);
+asmlinkage void __init x86_64_start_kernel(char *real_mode);
+asmlinkage void __init x86_64_start_reservations(char *real_mode_data);
#endif /* __i386__ */
#endif /* _SETUP */
@@ -123,6 +123,4 @@ void __init x86_64_start_reservations(char *real_mode_data);
.size .brk.name,.-1b; \
.popsection
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_SETUP_H */
diff --git a/arch/x86/include/asm/shmbuf.h b/arch/x86/include/asm/shmbuf.h
deleted file mode 100644
index 83c05fc2de3..00000000000
--- a/arch/x86/include/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmbuf.h>
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 04459d25e66..9dfce4e0417 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -1,104 +1,9 @@
#ifndef _ASM_X86_SIGCONTEXT_H
#define _ASM_X86_SIGCONTEXT_H
-#include <linux/compiler.h>
-#include <linux/types.h>
-
-#define FP_XSTATE_MAGIC1 0x46505853U
-#define FP_XSTATE_MAGIC2 0x46505845U
-#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
-
-/*
- * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
- * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
- * are used to extended the fpstate pointer in the sigcontext, which now
- * includes the extended state information along with fpstate information.
- *
- * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
- * area and FP_XSTATE_MAGIC2 at the end of memory layout
- * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
- * extended state information in the memory layout pointed by the fpstate
- * pointer in sigcontext.
- */
-struct _fpx_sw_bytes {
- __u32 magic1; /* FP_XSTATE_MAGIC1 */
- __u32 extended_size; /* total size of the layout referred by
- * fpstate pointer in the sigcontext.
- */
- __u64 xstate_bv;
- /* feature bit mask (including fp/sse/extended
- * state) that is present in the memory
- * layout.
- */
- __u32 xstate_size; /* actual xsave state size, based on the
- * features saved in the layout.
- * 'extended_size' will be greater than
- * 'xstate_size'.
- */
- __u32 padding[7]; /* for future use. */
-};
+#include <uapi/asm/sigcontext.h>
#ifdef __i386__
-/*
- * As documented in the iBCS2 standard..
- *
- * The first part of "struct _fpstate" is just the normal i387
- * hardware setup, the extra "status" word is used to save the
- * coprocessor status word before entering the handler.
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@valinux.com>, May 2000
- *
- * The FPU state data structure has had to grow to accommodate the
- * extended FPU state required by the Streaming SIMD Extensions.
- * There is no documented standard to accomplish this at the moment.
- */
-struct _fpreg {
- unsigned short significand[4];
- unsigned short exponent;
-};
-
-struct _fpxreg {
- unsigned short significand[4];
- unsigned short exponent;
- unsigned short padding[3];
-};
-
-struct _xmmreg {
- unsigned long element[4];
-};
-
-struct _fpstate {
- /* Regular FPU environment */
- unsigned long cw;
- unsigned long sw;
- unsigned long tag;
- unsigned long ipoff;
- unsigned long cssel;
- unsigned long dataoff;
- unsigned long datasel;
- struct _fpreg _st[8];
- unsigned short status;
- unsigned short magic; /* 0xffff = regular FPU data only */
-
- /* FXSR FPU environment */
- unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
- unsigned long mxcsr;
- unsigned long reserved;
- struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
- struct _xmmreg _xmm[8];
- unsigned long padding1[44];
-
- union {
- unsigned long padding2[12];
- struct _fpx_sw_bytes sw_reserved; /* represents the extended
- * state info */
- };
-};
-
-#define X86_FXSR_MAGIC 0x0000
-
-#ifdef __KERNEL__
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
@@ -131,62 +36,7 @@ struct sigcontext {
unsigned long oldmask;
unsigned long cr2;
};
-#else /* __KERNEL__ */
-/*
- * User-space might still rely on the old definition:
- */
-struct sigcontext {
- unsigned short gs, __gsh;
- unsigned short fs, __fsh;
- unsigned short es, __esh;
- unsigned short ds, __dsh;
- unsigned long edi;
- unsigned long esi;
- unsigned long ebp;
- unsigned long esp;
- unsigned long ebx;
- unsigned long edx;
- unsigned long ecx;
- unsigned long eax;
- unsigned long trapno;
- unsigned long err;
- unsigned long eip;
- unsigned short cs, __csh;
- unsigned long eflags;
- unsigned long esp_at_signal;
- unsigned short ss, __ssh;
- struct _fpstate __user *fpstate;
- unsigned long oldmask;
- unsigned long cr2;
-};
-#endif /* !__KERNEL__ */
-
#else /* __i386__ */
-
-/* FXSAVE frame */
-/* Note: reserved1/2 may someday contain valuable data. Always save/restore
- them when you change signal frames. */
-struct _fpstate {
- __u16 cwd;
- __u16 swd;
- __u16 twd; /* Note this is not the same as the
- 32bit/x87/FSAVE twd */
- __u16 fop;
- __u64 rip;
- __u64 rdp;
- __u32 mxcsr;
- __u32 mxcsr_mask;
- __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
- __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
- __u32 reserved2[12];
- union {
- __u32 reserved3[12];
- struct _fpx_sw_bytes sw_reserved; /* represents the extended
- * state information */
- };
-};
-
-#ifdef __KERNEL__
struct sigcontext {
unsigned long r8;
unsigned long r9;
@@ -225,66 +75,5 @@ struct sigcontext {
void __user *fpstate; /* zero when no FPU/extended context */
unsigned long reserved1[8];
};
-#else /* __KERNEL__ */
-/*
- * User-space might still rely on the old definition:
- */
-struct sigcontext {
- unsigned long r8;
- unsigned long r9;
- unsigned long r10;
- unsigned long r11;
- unsigned long r12;
- unsigned long r13;
- unsigned long r14;
- unsigned long r15;
- unsigned long rdi;
- unsigned long rsi;
- unsigned long rbp;
- unsigned long rbx;
- unsigned long rdx;
- unsigned long rax;
- unsigned long rcx;
- unsigned long rsp;
- unsigned long rip;
- unsigned long eflags; /* RFLAGS */
- unsigned short cs;
- unsigned short gs;
- unsigned short fs;
- unsigned short __pad0;
- unsigned long err;
- unsigned long trapno;
- unsigned long oldmask;
- unsigned long cr2;
- struct _fpstate __user *fpstate; /* zero when no FPU context */
- unsigned long reserved1[8];
-};
-#endif /* !__KERNEL__ */
-
#endif /* !__i386__ */
-
-struct _xsave_hdr {
- __u64 xstate_bv;
- __u64 reserved1[2];
- __u64 reserved2[5];
-};
-
-struct _ymmh_state {
- /* 16 * 16 bytes for each YMMH-reg */
- __u32 ymmh_space[64];
-};
-
-/*
- * Extended state pointed by the fpstate pointer in the sigcontext.
- * In addition to the fpstate, information encoded in the xstate_hdr
- * indicates the presence of other extended state information
- * supported by the processor and OS.
- */
-struct _xstate {
- struct _fpstate fpstate;
- struct _xsave_hdr xstate_hdr;
- struct _ymmh_state ymmh;
- /* new processor state extensions go here */
-};
-
#endif /* _ASM_X86_SIGCONTEXT_H */
diff --git a/arch/x86/include/asm/sigcontext32.h b/arch/x86/include/asm/sigcontext32.h
deleted file mode 100644
index ad1478c4ae1..00000000000
--- a/arch/x86/include/asm/sigcontext32.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _ASM_X86_SIGCONTEXT32_H
-#define _ASM_X86_SIGCONTEXT32_H
-
-#include <linux/types.h>
-
-/* signal context for 32bit programs. */
-
-#define X86_FXSR_MAGIC 0x0000
-
-struct _fpreg {
- unsigned short significand[4];
- unsigned short exponent;
-};
-
-struct _fpxreg {
- unsigned short significand[4];
- unsigned short exponent;
- unsigned short padding[3];
-};
-
-struct _xmmreg {
- __u32 element[4];
-};
-
-/* FSAVE frame with extensions */
-struct _fpstate_ia32 {
- /* Regular FPU environment */
- __u32 cw;
- __u32 sw;
- __u32 tag; /* not compatible to 64bit twd */
- __u32 ipoff;
- __u32 cssel;
- __u32 dataoff;
- __u32 datasel;
- struct _fpreg _st[8];
- unsigned short status;
- unsigned short magic; /* 0xffff = regular FPU data only */
-
- /* FXSR FPU environment */
- __u32 _fxsr_env[6];
- __u32 mxcsr;
- __u32 reserved;
- struct _fpxreg _fxsr_st[8];
- struct _xmmreg _xmm[8]; /* It's actually 16 */
- __u32 padding[44];
- union {
- __u32 padding2[12];
- struct _fpx_sw_bytes sw_reserved;
- };
-};
-
-struct sigcontext_ia32 {
- unsigned short gs, __gsh;
- unsigned short fs, __fsh;
- unsigned short es, __esh;
- unsigned short ds, __dsh;
- unsigned int di;
- unsigned int si;
- unsigned int bp;
- unsigned int sp;
- unsigned int bx;
- unsigned int dx;
- unsigned int cx;
- unsigned int ax;
- unsigned int trapno;
- unsigned int err;
- unsigned int ip;
- unsigned short cs, __csh;
- unsigned int flags;
- unsigned int sp_at_signal;
- unsigned short ss, __ssh;
- unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
- unsigned int oldmask;
- unsigned int cr2;
-};
-
-#endif /* _ASM_X86_SIGCONTEXT32_H */
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
index 4e0fe26d27d..7c7c27c97da 100644
--- a/arch/x86/include/asm/sigframe.h
+++ b/arch/x86/include/asm/sigframe.h
@@ -59,12 +59,25 @@ struct rt_sigframe_ia32 {
#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */
#ifdef CONFIG_X86_64
+
struct rt_sigframe {
char __user *pretcode;
struct ucontext uc;
struct siginfo info;
/* fp state follows here */
};
+
+#ifdef CONFIG_X86_X32_ABI
+
+struct rt_sigframe_x32 {
+ u64 pretcode;
+ struct ucontext_x32 uc;
+ compat_siginfo_t info;
+ /* fp state follows here */
+};
+
+#endif /* CONFIG_X86_X32_ABI */
+
#endif /* CONFIG_X86_64 */
#endif /* _ASM_X86_SIGFRAME_H */
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
new file mode 100644
index 00000000000..7a958164088
--- /dev/null
+++ b/arch/x86/include/asm/sighandling.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_X86_SIGHANDLING_H
+#define _ASM_X86_SIGHANDLING_H
+
+#include <linux/compiler.h>
+#include <linux/ptrace.h>
+#include <linux/signal.h>
+
+#include <asm/processor-flags.h>
+
+#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
+ X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
+ X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
+ X86_EFLAGS_CF | X86_EFLAGS_RF)
+
+void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
+
+int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ unsigned long *pax);
+int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
+ struct pt_regs *regs, unsigned long mask);
+
+#endif /* _ASM_X86_SIGHANDLING_H */
diff --git a/arch/x86/include/asm/siginfo.h b/arch/x86/include/asm/siginfo.h
deleted file mode 100644
index fc1aa553564..00000000000
--- a/arch/x86/include/asm/siginfo.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_X86_SIGINFO_H
-#define _ASM_X86_SIGINFO_H
-
-#ifdef __x86_64__
-# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-#endif
-
-#include <asm-generic/siginfo.h>
-
-#endif /* _ASM_X86_SIGINFO_H */
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 598457cbd0f..31eab867e6d 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -2,14 +2,6 @@
#define _ASM_X86_SIGNAL_H
#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/compiler.h>
-
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#ifdef __KERNEL__
#include <linux/linkage.h>
/* Most things should be clean enough to redefine this at will, if care
@@ -31,159 +23,17 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-#define NSIG 32
-typedef unsigned long sigset_t;
+#ifndef CONFIG_COMPAT
+typedef sigset_t compat_sigset_t;
+#endif
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001u
-#define SA_NOCLDWAIT 0x00000002u
-#define SA_SIGINFO 0x00000004u
-#define SA_ONSTACK 0x08000000u
-#define SA_RESTART 0x10000000u
-#define SA_NODEFER 0x40000000u
-#define SA_RESETHAND 0x80000000u
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-#define SA_RESTORER 0x04000000
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal-defs.h>
-
+#include <uapi/asm/signal.h>
#ifndef __ASSEMBLY__
-
-# ifdef __KERNEL__
extern void do_notify_resume(struct pt_regs *, void *, __u32);
-# endif /* __KERNEL__ */
-
-#ifdef __i386__
-# ifdef __KERNEL__
-struct old_sigaction {
- __sighandler_t sa_handler;
- old_sigset_t sa_mask;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
-};
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
- sigset_t sa_mask; /* mask last for extensibility */
-};
+#define __ARCH_HAS_SA_RESTORER
-struct k_sigaction {
- struct sigaction sa;
-};
-
-# else /* __KERNEL__ */
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-struct sigaction {
- union {
- __sighandler_t _sa_handler;
- void (*_sa_sigaction)(int, struct siginfo *, void *);
- } _u;
- sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-#define sa_handler _u._sa_handler
-#define sa_sigaction _u._sa_sigaction
-
-# endif /* ! __KERNEL__ */
-#else /* __i386__ */
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
-#endif /* !__i386__ */
-
-typedef struct sigaltstack {
- void __user *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
#include <asm/sigcontext.h>
#ifdef __i386__
@@ -242,12 +92,6 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
? __const_sigismember((set), (sig)) \
: __gen_sigismember((set), (sig)))
-static inline int sigfindinword(unsigned long word)
-{
- asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
- return word;
-}
-
struct pt_regs;
#else /* __i386__ */
@@ -256,9 +100,5 @@ struct pt_regs;
#endif /* !__i386__ */
-#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_X86_SIGNAL_H */
diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h
new file mode 100644
index 00000000000..ee80b92f009
--- /dev/null
+++ b/arch/x86/include/asm/simd.h
@@ -0,0 +1,11 @@
+
+#include <asm/i387.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ * instructions or access the SIMD register file
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ return irq_fpu_usable();
+}
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
new file mode 100644
index 00000000000..8d3120f4e27
--- /dev/null
+++ b/arch/x86/include/asm/smap.h
@@ -0,0 +1,91 @@
+/*
+ * Supervisor Mode Access Prevention support
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: H. Peter Anvin <hpa@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef _ASM_X86_SMAP_H
+#define _ASM_X86_SMAP_H
+
+#include <linux/stringify.h>
+#include <asm/nops.h>
+#include <asm/cpufeature.h>
+
+/* "Raw" instruction opcodes */
+#define __ASM_CLAC .byte 0x0f,0x01,0xca
+#define __ASM_STAC .byte 0x0f,0x01,0xcb
+
+#ifdef __ASSEMBLY__
+
+#include <asm/alternative-asm.h>
+
+#ifdef CONFIG_X86_SMAP
+
+#define ASM_CLAC \
+ 661: ASM_NOP3 ; \
+ .pushsection .altinstr_replacement, "ax" ; \
+ 662: __ASM_CLAC ; \
+ .popsection ; \
+ .pushsection .altinstructions, "a" ; \
+ altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
+ .popsection
+
+#define ASM_STAC \
+ 661: ASM_NOP3 ; \
+ .pushsection .altinstr_replacement, "ax" ; \
+ 662: __ASM_STAC ; \
+ .popsection ; \
+ .pushsection .altinstructions, "a" ; \
+ altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
+ .popsection
+
+#else /* CONFIG_X86_SMAP */
+
+#define ASM_CLAC
+#define ASM_STAC
+
+#endif /* CONFIG_X86_SMAP */
+
+#else /* __ASSEMBLY__ */
+
+#include <asm/alternative.h>
+
+#ifdef CONFIG_X86_SMAP
+
+static __always_inline void clac(void)
+{
+ /* Note: a barrier is implicit in alternative() */
+ alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
+}
+
+static __always_inline void stac(void)
+{
+ /* Note: a barrier is implicit in alternative() */
+ alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP);
+}
+
+/* These macros can be used in asm() statements */
+#define ASM_CLAC \
+ ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
+#define ASM_STAC \
+ ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP)
+
+#else /* CONFIG_X86_SMAP */
+
+static inline void clac(void) { }
+static inline void stac(void) { }
+
+#define ASM_CLAC
+#define ASM_STAC
+
+#endif /* CONFIG_X86_SMAP */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_SMAP_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 0434c400287..8cd27e08e23 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -2,7 +2,6 @@
#define _ASM_X86_SMP_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
-#include <linux/init.h>
#include <asm/percpu.h>
/*
@@ -31,12 +30,12 @@ static inline bool cpu_has_ht_siblings(void)
return has_siblings;
}
-DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
/* cpus sharing the last level cache: */
-DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
-DECLARE_PER_CPU(u16, cpu_llc_id);
-DECLARE_PER_CPU(int, cpu_number);
+DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
+DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
static inline struct cpumask *cpu_sibling_mask(int cpu)
{
@@ -53,15 +52,17 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
return per_cpu(cpu_llc_shared_map, cpu);
}
-DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
-DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
-DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
#endif
/* Static state in head.S used to set up a CPU */
extern unsigned long stack_start; /* Initial stack pointer address */
+struct task_struct;
+
struct smp_ops {
void (*smp_prepare_boot_cpu)(void);
void (*smp_prepare_cpus)(unsigned max_cpus);
@@ -70,7 +71,7 @@ struct smp_ops {
void (*stop_other_cpus)(int wait);
void (*smp_send_reschedule)(int cpu);
- int (*cpu_up)(unsigned cpu);
+ int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
void (*play_dead)(void);
@@ -113,9 +114,9 @@ static inline void smp_cpus_done(unsigned int max_cpus)
smp_ops.smp_cpus_done(max_cpus);
}
-static inline int __cpu_up(unsigned int cpu)
+static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- return smp_ops.cpu_up(cpu);
+ return smp_ops.cpu_up(cpu, tidle);
}
static inline int __cpu_disable(void)
@@ -152,7 +153,7 @@ void cpu_disable_common(void);
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus);
-int native_cpu_up(unsigned int cpunum);
+int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void);
void native_cpu_die(unsigned int cpu);
void native_play_dead(void);
@@ -162,15 +163,12 @@ int wbinvd_on_all_cpus(void);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
+void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
+void smp_store_boot_cpu_info(void);
void smp_store_cpu_info(int id);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
-/* We don't mark CPUs online until __cpu_up(), so we need another measure */
-static inline int num_booting_cpus(void)
-{
- return cpumask_weight(cpu_callout_mask);
-}
#else /* !CONFIG_SMP */
#define wbinvd_on_cpu(cpu) wbinvd()
static inline int wbinvd_on_all_cpus(void)
@@ -180,7 +178,7 @@ static inline int wbinvd_on_all_cpus(void)
}
#endif /* CONFIG_SMP */
-extern unsigned disabled_cpus __cpuinitdata;
+extern unsigned disabled_cpus;
#ifdef CONFIG_X86_32_SMP
/*
@@ -188,11 +186,11 @@ extern unsigned disabled_cpus __cpuinitdata;
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
-#define raw_smp_processor_id() (percpu_read(cpu_number))
+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
extern int safe_smp_processor_id(void);
#elif defined(CONFIG_X86_64_SMP)
-#define raw_smp_processor_id() (percpu_read(cpu_number))
+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
#define stack_smp_processor_id() \
({ \
diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h
deleted file mode 100644
index 6b71384b9d8..00000000000
--- a/arch/x86/include/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/x86/include/asm/sockios.h b/arch/x86/include/asm/sockios.h
deleted file mode 100644
index def6d4746ee..00000000000
--- a/arch/x86/include/asm/sockios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sockios.h>
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
new file mode 100644
index 00000000000..e820c080a4e
--- /dev/null
+++ b/arch/x86/include/asm/special_insns.h
@@ -0,0 +1,207 @@
+#ifndef _ASM_X86_SPECIAL_INSNS_H
+#define _ASM_X86_SPECIAL_INSNS_H
+
+
+#ifdef __KERNEL__
+
+static inline void native_clts(void)
+{
+ asm volatile("clts");
+}
+
+/*
+ * Volatile isn't enough to prevent the compiler from reordering the
+ * read/write functions for the control registers and messing everything up.
+ * A memory clobber would solve the problem, but would prevent reordering of
+ * all loads stores around it, which can hurt performance. Solution is to
+ * use a variable and mimic reads and writes to it to enforce serialization
+ */
+extern unsigned long __force_order;
+
+static inline unsigned long native_read_cr0(void)
+{
+ unsigned long val;
+ asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return val;
+}
+
+static inline void native_write_cr0(unsigned long val)
+{
+ asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr2(void)
+{
+ unsigned long val;
+ asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return val;
+}
+
+static inline void native_write_cr2(unsigned long val)
+{
+ asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr3(void)
+{
+ unsigned long val;
+ asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return val;
+}
+
+static inline void native_write_cr3(unsigned long val)
+{
+ asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr4(void)
+{
+ unsigned long val;
+ asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return val;
+}
+
+static inline unsigned long native_read_cr4_safe(void)
+{
+ unsigned long val;
+ /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
+ * exists, so it will never fail. */
+#ifdef CONFIG_X86_32
+ asm volatile("1: mov %%cr4, %0\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b)
+ : "=r" (val), "=m" (__force_order) : "0" (0));
+#else
+ val = native_read_cr4();
+#endif
+ return val;
+}
+
+static inline void native_write_cr4(unsigned long val)
+{
+ asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
+}
+
+#ifdef CONFIG_X86_64
+static inline unsigned long native_read_cr8(void)
+{
+ unsigned long cr8;
+ asm volatile("movq %%cr8,%0" : "=r" (cr8));
+ return cr8;
+}
+
+static inline void native_write_cr8(unsigned long val)
+{
+ asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
+}
+#endif
+
+static inline void native_wbinvd(void)
+{
+ asm volatile("wbinvd": : :"memory");
+}
+
+extern asmlinkage void native_load_gs_index(unsigned);
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+
+static inline unsigned long read_cr0(void)
+{
+ return native_read_cr0();
+}
+
+static inline void write_cr0(unsigned long x)
+{
+ native_write_cr0(x);
+}
+
+static inline unsigned long read_cr2(void)
+{
+ return native_read_cr2();
+}
+
+static inline void write_cr2(unsigned long x)
+{
+ native_write_cr2(x);
+}
+
+static inline unsigned long read_cr3(void)
+{
+ return native_read_cr3();
+}
+
+static inline void write_cr3(unsigned long x)
+{
+ native_write_cr3(x);
+}
+
+static inline unsigned long read_cr4(void)
+{
+ return native_read_cr4();
+}
+
+static inline unsigned long read_cr4_safe(void)
+{
+ return native_read_cr4_safe();
+}
+
+static inline void write_cr4(unsigned long x)
+{
+ native_write_cr4(x);
+}
+
+static inline void wbinvd(void)
+{
+ native_wbinvd();
+}
+
+#ifdef CONFIG_X86_64
+
+static inline unsigned long read_cr8(void)
+{
+ return native_read_cr8();
+}
+
+static inline void write_cr8(unsigned long x)
+{
+ native_write_cr8(x);
+}
+
+static inline void load_gs_index(unsigned selector)
+{
+ native_load_gs_index(selector);
+}
+
+#endif
+
+/* Clear the 'TS' bit */
+static inline void clts(void)
+{
+ native_clts();
+}
+
+#endif/* CONFIG_PARAVIRT */
+
+#define stts() write_cr0(read_cr0() | X86_CR0_TS)
+
+static inline void clflush(volatile void *__p)
+{
+ asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
+}
+
+static inline void clflushopt(volatile void *__p)
+{
+ alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
+ ".byte 0x66; clflush %P0",
+ X86_FEATURE_CLFLUSHOPT,
+ "+m" (*(volatile char __force *)__p));
+}
+
+#define nop() asm volatile ("nop")
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_SPECIAL_INSNS_H */
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index a82c2bf504b..54f1c8068c0 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -1,35 +1,34 @@
#ifndef _ASM_X86_SPINLOCK_H
#define _ASM_X86_SPINLOCK_H
+#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <linux/compiler.h>
#include <asm/paravirt.h>
+#include <asm/bitops.h>
+
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
- * These are fair FIFO ticket locks, which are currently limited to 256
- * CPUs.
+ * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
*
* (the type definitions are in asm/spinlock_types.h)
*/
#ifdef CONFIG_X86_32
# define LOCK_PTR_REG "a"
-# define REG_PTR_MODE "k"
#else
# define LOCK_PTR_REG "D"
-# define REG_PTR_MODE "q"
#endif
-#if defined(CONFIG_X86_32) && \
- (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
+#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
/*
- * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
+ * On PPro SMP, we use a locked operation to unlock
* (PPro errata 66, 92)
*/
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
@@ -37,6 +36,36 @@
# define UNLOCK_LOCK_PREFIX
#endif
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD (1 << 15)
+
+extern struct static_key paravirt_ticketlocks_enabled;
+static __always_inline bool static_key_false(struct static_key *key);
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+
+static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
+{
+ set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
+}
+
+#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
+ __ticket_t ticket)
+{
+}
+static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
+ __ticket_t ticket)
+{
+}
+
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.head == lock.tickets.tail;
+}
+
/*
* Ticket locks are conceptually two parts, one indicating the current head of
* the queue, and the other indicating the current tail. The lock is acquired
@@ -50,81 +79,101 @@
* in the high part, because a wide xadd increment of the low part would carry
* up and contaminate the high part.
*/
-static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
- register struct __raw_tickets inc = { .tail = 1 };
+ register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
inc = xadd(&lock->tickets, inc);
+ if (likely(inc.head == inc.tail))
+ goto out;
+ inc.tail &= ~TICKET_SLOWPATH_FLAG;
for (;;) {
- if (inc.head == inc.tail)
- break;
- cpu_relax();
- inc.head = ACCESS_ONCE(lock->tickets.head);
+ unsigned count = SPIN_THRESHOLD;
+
+ do {
+ if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
+ goto out;
+ cpu_relax();
+ } while (--count);
+ __ticket_lock_spinning(lock, inc.tail);
}
- barrier(); /* make sure nothing creeps before the lock is taken */
+out: barrier(); /* make sure nothing creeps before the lock is taken */
}
-static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
arch_spinlock_t old, new;
old.tickets = ACCESS_ONCE(lock->tickets);
- if (old.tickets.head != old.tickets.tail)
+ if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
return 0;
- new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
+ new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
/* cmpxchg is a full barrier, so nothing can move before it */
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}
-static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
+ arch_spinlock_t old)
{
- __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
+ arch_spinlock_t new;
+
+ BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
+
+ /* Perform the unlock on the "before" copy */
+ old.tickets.head += TICKET_LOCK_INC;
+
+ /* Clear the slowpath flag */
+ new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
+
+ /*
+ * If the lock is uncontended, clear the flag - use cmpxchg in
+ * case it changes behind our back though.
+ */
+ if (new.tickets.head != new.tickets.tail ||
+ cmpxchg(&lock->head_tail, old.head_tail,
+ new.head_tail) != old.head_tail) {
+ /*
+ * Lock still has someone queued for it, so wake up an
+ * appropriate waiter.
+ */
+ __ticket_unlock_kick(lock, old.tickets.head);
+ }
}
-static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ if (TICKET_SLOWPATH_FLAG &&
+ static_key_false(&paravirt_ticketlocks_enabled)) {
+ arch_spinlock_t prev;
- return !!(tmp.tail ^ tmp.head);
-}
+ prev = *lock;
+ add_smp(&lock->tickets.head, TICKET_LOCK_INC);
-static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
-{
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ /* add_smp() is a full mb() */
- return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
+ if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
+ __ticket_unlock_slowpath(lock, prev);
+ } else
+ __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
}
-#ifndef CONFIG_PARAVIRT_SPINLOCKS
-
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- return __ticket_spin_is_locked(lock);
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- return __ticket_spin_is_contended(lock);
-}
-#define arch_spin_is_contended arch_spin_is_contended
+ struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
-static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- __ticket_spin_lock(lock);
+ return tmp.tail != tmp.head;
}
-static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
- return __ticket_spin_trylock(lock);
-}
+ struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
-static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- __ticket_spin_unlock(lock);
+ return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
}
+#define arch_spin_is_contended arch_spin_is_contended
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
@@ -132,14 +181,13 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
arch_spin_lock(lock);
}
-#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (arch_spin_is_locked(lock))
cpu_relax();
}
+#ifndef CONFIG_QUEUE_RWLOCK
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
@@ -222,6 +270,9 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
: "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
}
+#else
+#include <asm/qrwlock.h>
+#endif /* CONFIG_QUEUE_RWLOCK */
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
@@ -236,8 +287,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
-/* The {read|write|spin}_lock() on x86 are full memory barriers. */
-static inline void smp_mb__after_lock(void) { }
-#define ARCH_HAS_SMP_MB_AFTER_LOCK
-
#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 8ebd5df7451..73c4c007200 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -1,13 +1,17 @@
#ifndef _ASM_X86_SPINLOCK_TYPES_H
#define _ASM_X86_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
-
#include <linux/types.h>
-#if (CONFIG_NR_CPUS < 256)
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define __TICKET_LOCK_INC 2
+#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
+#else
+#define __TICKET_LOCK_INC 1
+#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
+#endif
+
+#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
#else
@@ -15,8 +19,9 @@ typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif
+#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
+
#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
-#define TICKET_MASK ((__ticket_t)((1 << TICKET_SHIFT) - 1))
typedef struct arch_spinlock {
union {
@@ -29,6 +34,10 @@ typedef struct arch_spinlock {
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
+#ifdef CONFIG_QUEUE_RWLOCK
+#include <asm-generic/qrwlock_types.h>
+#else
#include <asm/rwlock.h>
+#endif
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/arch/x86/include/asm/sta2x11.h b/arch/x86/include/asm/sta2x11.h
new file mode 100644
index 00000000000..e9d32df89cc
--- /dev/null
+++ b/arch/x86/include/asm/sta2x11.h
@@ -0,0 +1,12 @@
+/*
+ * Header file for STMicroelectronics ConneXt (STA2X11) IOHub
+ */
+#ifndef __ASM_STA2X11_H
+#define __ASM_STA2X11_H
+
+#include <linux/pci.h>
+
+/* This needs to be called from the MFD to configure its sub-devices */
+struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev);
+
+#endif /* __ASM_STA2X11_H */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 15751776356..6a998598f17 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -38,7 +38,6 @@
#include <asm/tsc.h>
#include <asm/processor.h>
#include <asm/percpu.h>
-#include <asm/system.h>
#include <asm/desc.h>
#include <linux/random.h>
@@ -76,9 +75,9 @@ static __always_inline void boot_init_stack_canary(void)
current->stack_canary = canary;
#ifdef CONFIG_X86_64
- percpu_write(irq_stack_union.stack_canary, canary);
+ this_cpu_write(irq_stack_union.stack_canary, canary);
#else
- percpu_write(stack_canary.canary, canary);
+ this_cpu_write(stack_canary.canary, canary);
#endif
}
diff --git a/arch/x86/include/asm/stat.h b/arch/x86/include/asm/stat.h
deleted file mode 100644
index e0b1d9bbcbc..00000000000
--- a/arch/x86/include/asm/stat.h
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef _ASM_X86_STAT_H
-#define _ASM_X86_STAT_H
-
-#define STAT_HAVE_NSEC 1
-
-#ifdef __i386__
-struct stat {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned long st_rdev;
- unsigned long st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-#define STAT64_HAS_BROKEN_ST_INO 1
-
-/* This matches struct stat64 in glibc2.1, hence the absolutely
- * insane amounts of padding around dev_t's.
- */
-struct stat64 {
- unsigned long long st_dev;
- unsigned char __pad0[4];
-
- unsigned long __st_ino;
-
- unsigned int st_mode;
- unsigned int st_nlink;
-
- unsigned long st_uid;
- unsigned long st_gid;
-
- unsigned long long st_rdev;
- unsigned char __pad3[4];
-
- long long st_size;
- unsigned long st_blksize;
-
- /* Number 512-byte blocks allocated. */
- unsigned long long st_blocks;
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
-
- unsigned long st_mtime;
- unsigned int st_mtime_nsec;
-
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
-
- unsigned long long st_ino;
-};
-
-#else /* __i386__ */
-
-struct stat {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned long st_nlink;
-
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int __pad0;
- unsigned long st_rdev;
- long st_size;
- long st_blksize;
- long st_blocks; /* Number 512-byte blocks allocated. */
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- long __unused[3];
-};
-#endif
-
-/* for 32bit emulation and 32 bit kernels */
-struct __old_kernel_stat {
- unsigned short st_dev;
- unsigned short st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
-#ifdef __i386__
- unsigned long st_size;
- unsigned long st_atime;
- unsigned long st_mtime;
- unsigned long st_ctime;
-#else
- unsigned int st_size;
- unsigned int st_atime;
- unsigned int st_mtime;
- unsigned int st_ctime;
-#endif
-};
-
-#endif /* _ASM_X86_STAT_H */
diff --git a/arch/x86/include/asm/statfs.h b/arch/x86/include/asm/statfs.h
deleted file mode 100644
index 2d0adbf99a8..00000000000
--- a/arch/x86/include/asm/statfs.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_STATFS_H
-#define _ASM_X86_STATFS_H
-
-/*
- * We need compat_statfs64 to be packed, because the i386 ABI won't
- * add padding at the end to bring it to a multiple of 8 bytes, but
- * the x86_64 ABI will.
- */
-#define ARCH_PACK_COMPAT_STATFS64 __attribute__((packed,aligned(4)))
-
-#include <asm-generic/statfs.h>
-#endif /* _ASM_X86_STATFS_H */
diff --git a/arch/x86/include/asm/string.h b/arch/x86/include/asm/string.h
index 6dfd6d9373a..09224d7a586 100644
--- a/arch/x86/include/asm/string.h
+++ b/arch/x86/include/asm/string.h
@@ -1,5 +1,5 @@
#ifdef CONFIG_X86_32
-# include "string_32.h"
+# include <asm/string_32.h>
#else
-# include "string_64.h"
+# include <asm/string_64.h>
#endif
diff --git a/arch/x86/include/asm/suspend.h b/arch/x86/include/asm/suspend.h
index 9bd521fe457..2fab6c2c357 100644
--- a/arch/x86/include/asm/suspend.h
+++ b/arch/x86/include/asm/suspend.h
@@ -1,5 +1,5 @@
#ifdef CONFIG_X86_32
-# include "suspend_32.h"
+# include <asm/suspend_32.h>
#else
-# include "suspend_64.h"
+# include <asm/suspend_64.h>
#endif
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index 487055c8c1a..552d6c90a6d 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -15,7 +15,7 @@ struct saved_context {
unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable;
bool misc_enable_saved;
- struct desc_ptr gdt;
+ struct desc_ptr gdt_desc;
struct desc_ptr idt;
u16 ldt;
u16 tss;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 09b0bf10415..bc6232834ba 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -25,9 +25,8 @@ struct saved_context {
u64 misc_enable;
bool misc_enable_saved;
unsigned long efer;
- u16 gdt_pad;
- u16 gdt_limit;
- unsigned long gdt_base;
+ u16 gdt_pad; /* Unused */
+ struct desc_ptr gdt_desc;
u16 idt_pad;
u16 idt_limit;
unsigned long idt_base;
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index f2b83bc7d78..6136d99f537 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -1,6 +1,9 @@
#ifndef __SVM_H
#define __SVM_H
+#include <uapi/asm/svm.h>
+
+
enum {
INTERCEPT_INTR,
INTERCEPT_NMI,
@@ -264,81 +267,6 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXITINFO_REG_MASK 0x0F
-#define SVM_EXIT_READ_CR0 0x000
-#define SVM_EXIT_READ_CR3 0x003
-#define SVM_EXIT_READ_CR4 0x004
-#define SVM_EXIT_READ_CR8 0x008
-#define SVM_EXIT_WRITE_CR0 0x010
-#define SVM_EXIT_WRITE_CR3 0x013
-#define SVM_EXIT_WRITE_CR4 0x014
-#define SVM_EXIT_WRITE_CR8 0x018
-#define SVM_EXIT_READ_DR0 0x020
-#define SVM_EXIT_READ_DR1 0x021
-#define SVM_EXIT_READ_DR2 0x022
-#define SVM_EXIT_READ_DR3 0x023
-#define SVM_EXIT_READ_DR4 0x024
-#define SVM_EXIT_READ_DR5 0x025
-#define SVM_EXIT_READ_DR6 0x026
-#define SVM_EXIT_READ_DR7 0x027
-#define SVM_EXIT_WRITE_DR0 0x030
-#define SVM_EXIT_WRITE_DR1 0x031
-#define SVM_EXIT_WRITE_DR2 0x032
-#define SVM_EXIT_WRITE_DR3 0x033
-#define SVM_EXIT_WRITE_DR4 0x034
-#define SVM_EXIT_WRITE_DR5 0x035
-#define SVM_EXIT_WRITE_DR6 0x036
-#define SVM_EXIT_WRITE_DR7 0x037
-#define SVM_EXIT_EXCP_BASE 0x040
-#define SVM_EXIT_INTR 0x060
-#define SVM_EXIT_NMI 0x061
-#define SVM_EXIT_SMI 0x062
-#define SVM_EXIT_INIT 0x063
-#define SVM_EXIT_VINTR 0x064
-#define SVM_EXIT_CR0_SEL_WRITE 0x065
-#define SVM_EXIT_IDTR_READ 0x066
-#define SVM_EXIT_GDTR_READ 0x067
-#define SVM_EXIT_LDTR_READ 0x068
-#define SVM_EXIT_TR_READ 0x069
-#define SVM_EXIT_IDTR_WRITE 0x06a
-#define SVM_EXIT_GDTR_WRITE 0x06b
-#define SVM_EXIT_LDTR_WRITE 0x06c
-#define SVM_EXIT_TR_WRITE 0x06d
-#define SVM_EXIT_RDTSC 0x06e
-#define SVM_EXIT_RDPMC 0x06f
-#define SVM_EXIT_PUSHF 0x070
-#define SVM_EXIT_POPF 0x071
-#define SVM_EXIT_CPUID 0x072
-#define SVM_EXIT_RSM 0x073
-#define SVM_EXIT_IRET 0x074
-#define SVM_EXIT_SWINT 0x075
-#define SVM_EXIT_INVD 0x076
-#define SVM_EXIT_PAUSE 0x077
-#define SVM_EXIT_HLT 0x078
-#define SVM_EXIT_INVLPG 0x079
-#define SVM_EXIT_INVLPGA 0x07a
-#define SVM_EXIT_IOIO 0x07b
-#define SVM_EXIT_MSR 0x07c
-#define SVM_EXIT_TASK_SWITCH 0x07d
-#define SVM_EXIT_FERR_FREEZE 0x07e
-#define SVM_EXIT_SHUTDOWN 0x07f
-#define SVM_EXIT_VMRUN 0x080
-#define SVM_EXIT_VMMCALL 0x081
-#define SVM_EXIT_VMLOAD 0x082
-#define SVM_EXIT_VMSAVE 0x083
-#define SVM_EXIT_STGI 0x084
-#define SVM_EXIT_CLGI 0x085
-#define SVM_EXIT_SKINIT 0x086
-#define SVM_EXIT_RDTSCP 0x087
-#define SVM_EXIT_ICEBP 0x088
-#define SVM_EXIT_WBINVD 0x089
-#define SVM_EXIT_MONITOR 0x08a
-#define SVM_EXIT_MWAIT 0x08b
-#define SVM_EXIT_MWAIT_COND 0x08c
-#define SVM_EXIT_XSETBV 0x08d
-#define SVM_EXIT_NPF 0x400
-
-#define SVM_EXIT_ERR -1
-
#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
@@ -349,4 +277,3 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
#endif
-
diff --git a/arch/x86/include/asm/swab.h b/arch/x86/include/asm/swab.h
deleted file mode 100644
index 557cd9f0066..00000000000
--- a/arch/x86/include/asm/swab.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef _ASM_X86_SWAB_H
-#define _ASM_X86_SWAB_H
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-
-static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
-{
-#ifdef __i386__
-# ifdef CONFIG_X86_BSWAP
- asm("bswap %0" : "=r" (val) : "0" (val));
-# else
- asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
- "rorl $16,%0\n\t" /* swap words */
- "xchgb %b0,%h0" /* swap higher bytes */
- : "=q" (val)
- : "0" (val));
-# endif
-
-#else /* __i386__ */
- asm("bswapl %0"
- : "=r" (val)
- : "0" (val));
-#endif
- return val;
-}
-#define __arch_swab32 __arch_swab32
-
-static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
-{
-#ifdef __i386__
- union {
- struct {
- __u32 a;
- __u32 b;
- } s;
- __u64 u;
- } v;
- v.u = val;
-# ifdef CONFIG_X86_BSWAP
- asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
- : "=r" (v.s.a), "=r" (v.s.b)
- : "0" (v.s.a), "1" (v.s.b));
-# else
- v.s.a = __arch_swab32(v.s.a);
- v.s.b = __arch_swab32(v.s.b);
- asm("xchgl %0,%1"
- : "=r" (v.s.a), "=r" (v.s.b)
- : "0" (v.s.a), "1" (v.s.b));
-# endif
- return v.u;
-#else /* __i386__ */
- asm("bswapq %0"
- : "=r" (val)
- : "0" (val));
- return val;
-#endif
-}
-#define __arch_swab64 __arch_swab64
-
-#endif /* _ASM_X86_SWAB_H */
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
index 977f1761a25..ab05d73e2bb 100644
--- a/arch/x86/include/asm/swiotlb.h
+++ b/arch/x86/include/asm/swiotlb.h
@@ -29,4 +29,11 @@ static inline void pci_swiotlb_late_init(void)
static inline void dma_mark_clean(void *addr, size_t size) {}
+extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs);
+extern void x86_swiotlb_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs);
+
#endif /* _ASM_X86_SWIOTLB_H */
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
new file mode 100644
index 00000000000..d7f3b3b78ac
--- /dev/null
+++ b/arch/x86/include/asm/switch_to.h
@@ -0,0 +1,129 @@
+#ifndef _ASM_X86_SWITCH_TO_H
+#define _ASM_X86_SWITCH_TO_H
+
+struct task_struct; /* one of the stranger aspects of C forward declarations */
+__visible struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next);
+struct tss_struct;
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ struct tss_struct *tss);
+
+#ifdef CONFIG_X86_32
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __switch_canary \
+ "movl %P[task_canary](%[next]), %%ebx\n\t" \
+ "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
+#define __switch_canary_oparam \
+ , [stack_canary] "=m" (stack_canary.canary)
+#define __switch_canary_iparam \
+ , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+#else /* CC_STACKPROTECTOR */
+#define __switch_canary
+#define __switch_canary_oparam
+#define __switch_canary_iparam
+#endif /* CC_STACKPROTECTOR */
+
+/*
+ * Saving eflags is important. It switches not only IOPL between tasks,
+ * it also protects other tasks from NT leaking through sysenter etc.
+ */
+#define switch_to(prev, next, last) \
+do { \
+ /* \
+ * Context-switching clobbers all registers, so we clobber \
+ * them explicitly, via unused output variables. \
+ * (EAX and EBP is not listed because EBP is saved/restored \
+ * explicitly for wchan access and EAX is the return value of \
+ * __switch_to()) \
+ */ \
+ unsigned long ebx, ecx, edx, esi, edi; \
+ \
+ asm volatile("pushfl\n\t" /* save flags */ \
+ "pushl %%ebp\n\t" /* save EBP */ \
+ "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
+ "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
+ "pushl %[next_ip]\n\t" /* restore EIP */ \
+ __switch_canary \
+ "jmp __switch_to\n" /* regparm call */ \
+ "1:\t" \
+ "popl %%ebp\n\t" /* restore EBP */ \
+ "popfl\n" /* restore flags */ \
+ \
+ /* output parameters */ \
+ : [prev_sp] "=m" (prev->thread.sp), \
+ [prev_ip] "=m" (prev->thread.ip), \
+ "=a" (last), \
+ \
+ /* clobbered output registers: */ \
+ "=b" (ebx), "=c" (ecx), "=d" (edx), \
+ "=S" (esi), "=D" (edi) \
+ \
+ __switch_canary_oparam \
+ \
+ /* input parameters: */ \
+ : [next_sp] "m" (next->thread.sp), \
+ [next_ip] "m" (next->thread.ip), \
+ \
+ /* regparm parameters for __switch_to(): */ \
+ [prev] "a" (prev), \
+ [next] "d" (next) \
+ \
+ __switch_canary_iparam \
+ \
+ : /* reloaded segment registers */ \
+ "memory"); \
+} while (0)
+
+#else /* CONFIG_X86_32 */
+
+/* frame pointer must be last for get_wchan */
+#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
+
+#define __EXTRA_CLOBBER \
+ , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
+ "r12", "r13", "r14", "r15"
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __switch_canary \
+ "movq %P[task_canary](%%rsi),%%r8\n\t" \
+ "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
+#define __switch_canary_oparam \
+ , [gs_canary] "=m" (irq_stack_union.stack_canary)
+#define __switch_canary_iparam \
+ , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+#else /* CC_STACKPROTECTOR */
+#define __switch_canary
+#define __switch_canary_oparam
+#define __switch_canary_iparam
+#endif /* CC_STACKPROTECTOR */
+
+/* Save restore flags to clear handle leaking NT */
+#define switch_to(prev, next, last) \
+ asm volatile(SAVE_CONTEXT \
+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
+ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
+ "call __switch_to\n\t" \
+ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
+ __switch_canary \
+ "movq %P[thread_info](%%rsi),%%r8\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "jnz ret_from_fork\n\t" \
+ RESTORE_CONTEXT \
+ : "=a" (last) \
+ __switch_canary_oparam \
+ : [next] "S" (next), [prev] "D" (prev), \
+ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)), \
+ [_tif_fork] "i" (_TIF_FORK), \
+ [thread_info] "i" (offsetof(struct task_struct, stack)), \
+ [current_task] "m" (current_task) \
+ __switch_canary_iparam \
+ : "memory", "cc" __EXTRA_CLOBBER)
+
+#endif /* CONFIG_X86_32 */
+
+#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h
index 9d09b4073b6..f28a24b51dc 100644
--- a/arch/x86/include/asm/sync_bitops.h
+++ b/arch/x86/include/asm/sync_bitops.h
@@ -26,9 +26,9 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void sync_set_bit(int nr, volatile unsigned long *addr)
+static inline void sync_set_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; btsl %1,%0"
+ asm volatile("lock; bts %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -41,12 +41,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long *addr)
*
* sync_clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
-static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
+static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; btrl %1,%0"
+ asm volatile("lock; btr %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -61,9 +61,9 @@ static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void sync_change_bit(int nr, volatile unsigned long *addr)
+static inline void sync_change_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; btcl %1,%0"
+ asm volatile("lock; btc %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -77,11 +77,11 @@ static inline void sync_change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
- asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
+ asm volatile("lock; bts %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
@@ -95,11 +95,11 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
- asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
+ asm volatile("lock; btr %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
@@ -113,11 +113,11 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
- asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
+ asm volatile("lock; btc %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index cb238526a9f..82c34ee25a6 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -10,6 +10,8 @@
#ifndef _ASM_X86_SYS_IA32_H
#define _ASM_X86_SYS_IA32_H
+#ifdef CONFIG_COMPAT
+
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
@@ -28,41 +30,14 @@ asmlinkage long sys32_fstatat(unsigned int, const char __user *,
struct stat64 __user *, int);
struct mmap_arg_struct32;
asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *);
-asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long);
-
-struct sigaction32;
-struct old_sigaction32;
-asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *,
- struct sigaction32 __user *, unsigned int);
-asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
- struct old_sigaction32 __user *);
-asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
- compat_sigset_t __user *, unsigned int);
-asmlinkage long sys32_alarm(unsigned int);
-
-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
-asmlinkage long sys32_sysfs(int, u32, u32);
-asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
- struct compat_timespec __user *);
-asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t);
-asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *);
+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32);
asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
-asmlinkage long sys32_personality(unsigned long);
-asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
-
-asmlinkage long sys32_execve(const char __user *, compat_uptr_t __user *,
- compat_uptr_t __user *, struct pt_regs *);
-asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *);
-
-long sys32_lseek(unsigned int, int, unsigned int);
-long sys32_kill(int, int);
long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
long sys32_vm86_warning(void);
-long sys32_lookup_dcookie(u32, u32, char __user *, size_t);
asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t);
asmlinkage long sys32_sync_file_range(int, unsigned, unsigned,
@@ -72,15 +47,9 @@ asmlinkage long sys32_fallocate(int, int, unsigned,
unsigned, unsigned, unsigned);
/* ia32/ia32_signal.c */
-asmlinkage long sys32_sigsuspend(int, int, old_sigset_t);
-asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *,
- stack_ia32_t __user *, struct pt_regs *);
-asmlinkage long sys32_sigreturn(struct pt_regs *);
-asmlinkage long sys32_rt_sigreturn(struct pt_regs *);
+asmlinkage long sys32_sigreturn(void);
+asmlinkage long sys32_rt_sigreturn(void);
-/* ia32/ipc32.c */
-asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
+#endif /* CONFIG_COMPAT */
-asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
- const char __user *);
#endif /* _ASM_X86_SYS_IA32_H */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index d962e5652a7..d6a756ae04c 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -13,11 +13,15 @@
#ifndef _ASM_X86_SYSCALL_H
#define _ASM_X86_SYSCALL_H
+#include <uapi/linux/audit.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <asm/asm-offsets.h> /* For NR_syscalls */
+#include <asm/thread_info.h> /* for TS_COMPAT */
+#include <asm/unistd.h>
-extern const unsigned long sys_call_table[];
+typedef void (*sys_call_ptr_t)(void);
+extern const sys_call_ptr_t sys_call_table[];
/*
* Only the low 32 bits of orig_ax are meaningful, so we return int.
@@ -87,6 +91,11 @@ static inline void syscall_set_arguments(struct task_struct *task,
memcpy(&regs->bx + i, args, n * sizeof(args[0]));
}
+static inline int syscall_get_arch(void)
+{
+ return AUDIT_ARCH_I386;
+}
+
#else /* CONFIG_X86_64 */
static inline void syscall_get_arguments(struct task_struct *task,
@@ -211,6 +220,24 @@ static inline void syscall_set_arguments(struct task_struct *task,
}
}
+static inline int syscall_get_arch(void)
+{
+#ifdef CONFIG_IA32_EMULATION
+ /*
+ * TS_COMPAT is set for 32-bit syscall entry and then
+ * remains set until we return to user mode.
+ *
+ * TIF_IA32 tasks should always have TS_COMPAT set at
+ * system call time.
+ *
+ * x32 tasks should be considered AUDIT_ARCH_X86_64.
+ */
+ if (task_thread_info(current)->status & TS_COMPAT)
+ return AUDIT_ARCH_I386;
+#endif
+ /* Both x32 and x86_64 are considered "64-bit". */
+ return AUDIT_ARCH_X86_64;
+}
#endif /* CONFIG_X86_32 */
#endif /* _ASM_X86_SYSCALL_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index f1d8b441fc7..592a6a672e0 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -18,48 +18,33 @@
/* Common in X86_32 and X86_64 */
/* kernel/ioport.c */
asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
-long sys_iopl(unsigned int, struct pt_regs *);
-
-/* kernel/process.c */
-int sys_fork(struct pt_regs *);
-int sys_vfork(struct pt_regs *);
-long sys_execve(const char __user *,
- const char __user *const __user *,
- const char __user *const __user *, struct pt_regs *);
-long sys_clone(unsigned long, unsigned long, void __user *,
- void __user *, struct pt_regs *);
+asmlinkage long sys_iopl(unsigned int);
/* kernel/ldt.c */
asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
/* kernel/signal.c */
-long sys_rt_sigreturn(struct pt_regs *);
-long sys_sigaltstack(const stack_t __user *, stack_t __user *,
- struct pt_regs *);
-
+asmlinkage long sys_rt_sigreturn(void);
/* kernel/tls.c */
-asmlinkage int sys_set_thread_area(struct user_desc __user *);
-asmlinkage int sys_get_thread_area(struct user_desc __user *);
+asmlinkage long sys_set_thread_area(struct user_desc __user *);
+asmlinkage long sys_get_thread_area(struct user_desc __user *);
/* X86_32 only */
#ifdef CONFIG_X86_32
/* kernel/signal.c */
-asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
-asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
- struct old_sigaction __user *);
-unsigned long sys_sigreturn(struct pt_regs *);
+asmlinkage unsigned long sys_sigreturn(void);
/* kernel/vm86_32.c */
-int sys_vm86old(struct vm86_struct __user *, struct pt_regs *);
-int sys_vm86(unsigned long, unsigned long, struct pt_regs *);
+asmlinkage long sys_vm86old(struct vm86_struct __user *);
+asmlinkage long sys_vm86(unsigned long, unsigned long);
#else /* CONFIG_X86_32 */
/* X86_64 only */
/* kernel/process_64.c */
-long sys_arch_prctl(int, unsigned long);
+asmlinkage long sys_arch_prctl(int, unsigned long);
/* kernel/sys_x86_64.c */
asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
diff --git a/arch/x86/include/asm/sysfb.h b/arch/x86/include/asm/sysfb.h
new file mode 100644
index 00000000000..2aeb3e25579
--- /dev/null
+++ b/arch/x86/include/asm/sysfb.h
@@ -0,0 +1,98 @@
+#ifndef _ARCH_X86_KERNEL_SYSFB_H
+#define _ARCH_X86_KERNEL_SYSFB_H
+
+/*
+ * Generic System Framebuffers on x86
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/screen_info.h>
+
+enum {
+ M_I17, /* 17-Inch iMac */
+ M_I20, /* 20-Inch iMac */
+ M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
+ M_I24, /* 24-Inch iMac */
+ M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
+ M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
+ M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
+ M_MINI, /* Mac Mini */
+ M_MINI_3_1, /* Mac Mini, 3,1th gen */
+ M_MINI_4_1, /* Mac Mini, 4,1th gen */
+ M_MB, /* MacBook */
+ M_MB_2, /* MacBook, 2nd rev. */
+ M_MB_3, /* MacBook, 3rd rev. */
+ M_MB_5_1, /* MacBook, 5th rev. */
+ M_MB_6_1, /* MacBook, 6th rev. */
+ M_MB_7_1, /* MacBook, 7th rev. */
+ M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
+ M_MBA, /* MacBook Air */
+ M_MBA_3, /* Macbook Air, 3rd rev */
+ M_MBP, /* MacBook Pro */
+ M_MBP_2, /* MacBook Pro 2nd gen */
+ M_MBP_2_2, /* MacBook Pro 2,2nd gen */
+ M_MBP_SR, /* MacBook Pro (Santa Rosa) */
+ M_MBP_4, /* MacBook Pro, 4th gen */
+ M_MBP_5_1, /* MacBook Pro, 5,1th gen */
+ M_MBP_5_2, /* MacBook Pro, 5,2th gen */
+ M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
+ M_MBP_6_1, /* MacBook Pro, 6,1th gen */
+ M_MBP_6_2, /* MacBook Pro, 6,2th gen */
+ M_MBP_7_1, /* MacBook Pro, 7,1th gen */
+ M_MBP_8_2, /* MacBook Pro, 8,2nd gen */
+ M_UNKNOWN /* placeholder */
+};
+
+struct efifb_dmi_info {
+ char *optname;
+ unsigned long base;
+ int stride;
+ int width;
+ int height;
+ int flags;
+};
+
+#ifdef CONFIG_EFI
+
+extern struct efifb_dmi_info efifb_dmi_list[];
+void sysfb_apply_efi_quirks(void);
+
+#else /* CONFIG_EFI */
+
+static inline void sysfb_apply_efi_quirks(void)
+{
+}
+
+#endif /* CONFIG_EFI */
+
+#ifdef CONFIG_X86_SYSFB
+
+bool parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode);
+int create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode);
+
+#else /* CONFIG_X86_SYSFB */
+
+static inline bool parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode)
+{
+ return false;
+}
+
+static inline int create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_X86_SYSFB */
+
+#endif /* _ARCH_X86_KERNEL_SYSFB_H */
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
deleted file mode 100644
index 2d2f01ce6dc..00000000000
--- a/arch/x86/include/asm/system.h
+++ /dev/null
@@ -1,523 +0,0 @@
-#ifndef _ASM_X86_SYSTEM_H
-#define _ASM_X86_SYSTEM_H
-
-#include <asm/asm.h>
-#include <asm/segment.h>
-#include <asm/cpufeature.h>
-#include <asm/cmpxchg.h>
-#include <asm/nops.h>
-
-#include <linux/kernel.h>
-#include <linux/irqflags.h>
-
-/* entries in ARCH_DLINFO: */
-#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
-# define AT_VECTOR_SIZE_ARCH 2
-#else /* else it's non-compat x86-64 */
-# define AT_VECTOR_SIZE_ARCH 1
-#endif
-
-struct task_struct; /* one of the stranger aspects of C forward declarations */
-struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *next);
-struct tss_struct;
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
- struct tss_struct *tss);
-extern void show_regs_common(void);
-
-#ifdef CONFIG_X86_32
-
-#ifdef CONFIG_CC_STACKPROTECTOR
-#define __switch_canary \
- "movl %P[task_canary](%[next]), %%ebx\n\t" \
- "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
-#define __switch_canary_oparam \
- , [stack_canary] "=m" (stack_canary.canary)
-#define __switch_canary_iparam \
- , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
-#else /* CC_STACKPROTECTOR */
-#define __switch_canary
-#define __switch_canary_oparam
-#define __switch_canary_iparam
-#endif /* CC_STACKPROTECTOR */
-
-/*
- * Saving eflags is important. It switches not only IOPL between tasks,
- * it also protects other tasks from NT leaking through sysenter etc.
- */
-#define switch_to(prev, next, last) \
-do { \
- /* \
- * Context-switching clobbers all registers, so we clobber \
- * them explicitly, via unused output variables. \
- * (EAX and EBP is not listed because EBP is saved/restored \
- * explicitly for wchan access and EAX is the return value of \
- * __switch_to()) \
- */ \
- unsigned long ebx, ecx, edx, esi, edi; \
- \
- asm volatile("pushfl\n\t" /* save flags */ \
- "pushl %%ebp\n\t" /* save EBP */ \
- "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
- "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
- "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
- "pushl %[next_ip]\n\t" /* restore EIP */ \
- __switch_canary \
- "jmp __switch_to\n" /* regparm call */ \
- "1:\t" \
- "popl %%ebp\n\t" /* restore EBP */ \
- "popfl\n" /* restore flags */ \
- \
- /* output parameters */ \
- : [prev_sp] "=m" (prev->thread.sp), \
- [prev_ip] "=m" (prev->thread.ip), \
- "=a" (last), \
- \
- /* clobbered output registers: */ \
- "=b" (ebx), "=c" (ecx), "=d" (edx), \
- "=S" (esi), "=D" (edi) \
- \
- __switch_canary_oparam \
- \
- /* input parameters: */ \
- : [next_sp] "m" (next->thread.sp), \
- [next_ip] "m" (next->thread.ip), \
- \
- /* regparm parameters for __switch_to(): */ \
- [prev] "a" (prev), \
- [next] "d" (next) \
- \
- __switch_canary_iparam \
- \
- : /* reloaded segment registers */ \
- "memory"); \
-} while (0)
-
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-#else
-
-/* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
-
-#define __EXTRA_CLOBBER \
- , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
- "r12", "r13", "r14", "r15"
-
-#ifdef CONFIG_CC_STACKPROTECTOR
-#define __switch_canary \
- "movq %P[task_canary](%%rsi),%%r8\n\t" \
- "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
-#define __switch_canary_oparam \
- , [gs_canary] "=m" (irq_stack_union.stack_canary)
-#define __switch_canary_iparam \
- , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
-#else /* CC_STACKPROTECTOR */
-#define __switch_canary
-#define __switch_canary_oparam
-#define __switch_canary_iparam
-#endif /* CC_STACKPROTECTOR */
-
-/* Save restore flags to clear handle leaking NT */
-#define switch_to(prev, next, last) \
- asm volatile(SAVE_CONTEXT \
- "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
- "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
- "call __switch_to\n\t" \
- "movq "__percpu_arg([current_task])",%%rsi\n\t" \
- __switch_canary \
- "movq %P[thread_info](%%rsi),%%r8\n\t" \
- "movq %%rax,%%rdi\n\t" \
- "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
- "jnz ret_from_fork\n\t" \
- RESTORE_CONTEXT \
- : "=a" (last) \
- __switch_canary_oparam \
- : [next] "S" (next), [prev] "D" (prev), \
- [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
- [ti_flags] "i" (offsetof(struct thread_info, flags)), \
- [_tif_fork] "i" (_TIF_FORK), \
- [thread_info] "i" (offsetof(struct task_struct, stack)), \
- [current_task] "m" (current_task) \
- __switch_canary_iparam \
- : "memory", "cc" __EXTRA_CLOBBER)
-#endif
-
-#ifdef __KERNEL__
-
-extern void native_load_gs_index(unsigned);
-
-/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg, value) \
-do { \
- unsigned short __val = (value); \
- \
- asm volatile(" \n" \
- "1: movl %k0,%%" #seg " \n" \
- \
- ".section .fixup,\"ax\" \n" \
- "2: xorl %k0,%k0 \n" \
- " jmp 1b \n" \
- ".previous \n" \
- \
- _ASM_EXTABLE(1b, 2b) \
- \
- : "+r" (__val) : : "memory"); \
-} while (0)
-
-/*
- * Save a segment register away
- */
-#define savesegment(seg, value) \
- asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
-
-/*
- * x86_32 user gs accessors.
- */
-#ifdef CONFIG_X86_32
-#ifdef CONFIG_X86_32_LAZY_GS
-#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
-#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
-#define task_user_gs(tsk) ((tsk)->thread.gs)
-#define lazy_save_gs(v) savesegment(gs, (v))
-#define lazy_load_gs(v) loadsegment(gs, (v))
-#else /* X86_32_LAZY_GS */
-#define get_user_gs(regs) (u16)((regs)->gs)
-#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
-#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
-#define lazy_save_gs(v) do { } while (0)
-#define lazy_load_gs(v) do { } while (0)
-#endif /* X86_32_LAZY_GS */
-#endif /* X86_32 */
-
-static inline unsigned long get_limit(unsigned long segment)
-{
- unsigned long __limit;
- asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
- return __limit + 1;
-}
-
-static inline void native_clts(void)
-{
- asm volatile("clts");
-}
-
-/*
- * Volatile isn't enough to prevent the compiler from reordering the
- * read/write functions for the control registers and messing everything up.
- * A memory clobber would solve the problem, but would prevent reordering of
- * all loads stores around it, which can hurt performance. Solution is to
- * use a variable and mimic reads and writes to it to enforce serialization
- */
-static unsigned long __force_order;
-
-static inline unsigned long native_read_cr0(void)
-{
- unsigned long val;
- asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
- return val;
-}
-
-static inline void native_write_cr0(unsigned long val)
-{
- asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
-}
-
-static inline unsigned long native_read_cr2(void)
-{
- unsigned long val;
- asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
- return val;
-}
-
-static inline void native_write_cr2(unsigned long val)
-{
- asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
-}
-
-static inline unsigned long native_read_cr3(void)
-{
- unsigned long val;
- asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
- return val;
-}
-
-static inline void native_write_cr3(unsigned long val)
-{
- asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
-}
-
-static inline unsigned long native_read_cr4(void)
-{
- unsigned long val;
- asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
- return val;
-}
-
-static inline unsigned long native_read_cr4_safe(void)
-{
- unsigned long val;
- /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
- * exists, so it will never fail. */
-#ifdef CONFIG_X86_32
- asm volatile("1: mov %%cr4, %0\n"
- "2:\n"
- _ASM_EXTABLE(1b, 2b)
- : "=r" (val), "=m" (__force_order) : "0" (0));
-#else
- val = native_read_cr4();
-#endif
- return val;
-}
-
-static inline void native_write_cr4(unsigned long val)
-{
- asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
-}
-
-#ifdef CONFIG_X86_64
-static inline unsigned long native_read_cr8(void)
-{
- unsigned long cr8;
- asm volatile("movq %%cr8,%0" : "=r" (cr8));
- return cr8;
-}
-
-static inline void native_write_cr8(unsigned long val)
-{
- asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
-}
-#endif
-
-static inline void native_wbinvd(void)
-{
- asm volatile("wbinvd": : :"memory");
-}
-
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else
-
-static inline unsigned long read_cr0(void)
-{
- return native_read_cr0();
-}
-
-static inline void write_cr0(unsigned long x)
-{
- native_write_cr0(x);
-}
-
-static inline unsigned long read_cr2(void)
-{
- return native_read_cr2();
-}
-
-static inline void write_cr2(unsigned long x)
-{
- native_write_cr2(x);
-}
-
-static inline unsigned long read_cr3(void)
-{
- return native_read_cr3();
-}
-
-static inline void write_cr3(unsigned long x)
-{
- native_write_cr3(x);
-}
-
-static inline unsigned long read_cr4(void)
-{
- return native_read_cr4();
-}
-
-static inline unsigned long read_cr4_safe(void)
-{
- return native_read_cr4_safe();
-}
-
-static inline void write_cr4(unsigned long x)
-{
- native_write_cr4(x);
-}
-
-static inline void wbinvd(void)
-{
- native_wbinvd();
-}
-
-#ifdef CONFIG_X86_64
-
-static inline unsigned long read_cr8(void)
-{
- return native_read_cr8();
-}
-
-static inline void write_cr8(unsigned long x)
-{
- native_write_cr8(x);
-}
-
-static inline void load_gs_index(unsigned selector)
-{
- native_load_gs_index(selector);
-}
-
-#endif
-
-/* Clear the 'TS' bit */
-static inline void clts(void)
-{
- native_clts();
-}
-
-#endif/* CONFIG_PARAVIRT */
-
-#define stts() write_cr0(read_cr0() | X86_CR0_TS)
-
-#endif /* __KERNEL__ */
-
-static inline void clflush(volatile void *__p)
-{
- asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
-}
-
-#define nop() asm volatile ("nop")
-
-void disable_hlt(void);
-void enable_hlt(void);
-
-void cpu_idle_wait(void);
-
-extern unsigned long arch_align_stack(unsigned long sp);
-extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
-
-void default_idle(void);
-bool set_pm_idle_to_default(void);
-
-void stop_this_cpu(void *dummy);
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- */
-#ifdef CONFIG_X86_32
-/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-#else
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence" ::: "memory")
-#endif
-
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like this where there are no data dependencies.
- **/
-
-#define read_barrier_depends() do { } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#ifdef CONFIG_X86_PPRO_FENCE
-# define smp_rmb() rmb()
-#else
-# define smp_rmb() barrier()
-#endif
-#ifdef CONFIG_X86_OOSTORE
-# define smp_wmb() wmb()
-#else
-# define smp_wmb() barrier()
-#endif
-#define smp_read_barrier_depends() read_barrier_depends()
-#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while (0)
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
-
-/*
- * Stop RDTSC speculation. This is needed when you need to use RDTSC
- * (or get_cycles or vread that possibly accesses the TSC) in a defined
- * code region.
- *
- * (Could use an alternative three way for this if there was one.)
- */
-static __always_inline void rdtsc_barrier(void)
-{
- alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
- alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
-}
-
-/*
- * We handle most unaligned accesses in hardware. On the other hand
- * unaligned DMA can be quite expensive on some Nehalem processors.
- *
- * Based on this we disable the IP header alignment in network drivers.
- */
-#define NET_IP_ALIGN 0
-#endif /* _ASM_X86_SYSTEM_H */
diff --git a/arch/x86/include/asm/termbits.h b/arch/x86/include/asm/termbits.h
deleted file mode 100644
index 3935b106de7..00000000000
--- a/arch/x86/include/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termbits.h>
diff --git a/arch/x86/include/asm/termios.h b/arch/x86/include/asm/termios.h
deleted file mode 100644
index 280d78a9d96..00000000000
--- a/arch/x86/include/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termios.h>
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index cfd8144d552..854053889d4 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -9,6 +9,7 @@
#include <linux/compiler.h>
#include <asm/page.h>
+#include <asm/percpu.h>
#include <asm/types.h>
/*
@@ -20,7 +21,6 @@
struct task_struct;
struct exec_domain;
#include <asm/processor.h>
-#include <asm/ftrace.h>
#include <linux/atomic.h>
struct thread_info {
@@ -29,17 +29,10 @@ struct thread_info {
__u32 flags; /* low level flags */
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
- int preempt_count; /* 0 => preemptable,
- <0 => BUG */
+ int saved_preempt_count;
mm_segment_t addr_limit;
struct restart_block restart_block;
void __user *sysenter_return;
-#ifdef CONFIG_X86_32
- unsigned long previous_esp; /* ESP of the previous stack in
- case of nested (IRQ) stacks
- */
- __u8 supervisor_stack[0];
-#endif
unsigned int sig_on_uaccess_error:1;
unsigned int uaccess_err:1; /* uaccess failed */
};
@@ -50,7 +43,7 @@ struct thread_info {
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
- .preempt_count = INIT_PREEMPT_COUNT, \
+ .saved_preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
@@ -79,53 +72,60 @@ struct thread_info {
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
-#define TIF_IRET 5 /* force IRET */
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
-#define TIF_IA32 17 /* 32bit process */
+#define TIF_IA32 17 /* IA32 compatibility process */
#define TIF_FORK 18 /* ret_from_fork */
+#define TIF_NOHZ 19 /* in adaptive nohz mode */
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
-#define TIF_DEBUG 21 /* uses debug registers */
+#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
+#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
+#define TIF_X32 30 /* 32-bit native x86-64 binary */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
-#define _TIF_IRET (1 << TIF_IRET)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_NOTSC (1 << TIF_NOTSC)
#define _TIF_IA32 (1 << TIF_IA32)
#define _TIF_FORK (1 << TIF_FORK)
-#define _TIF_DEBUG (1 << TIF_DEBUG)
+#define _TIF_NOHZ (1 << TIF_NOHZ)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_ADDR32 (1 << TIF_ADDR32)
+#define _TIF_X32 (1 << TIF_X32)
/* work to do in syscall_trace_enter() */
#define _TIF_WORK_SYSCALL_ENTRY \
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_NOHZ)
/* work to do in syscall_trace_leave() */
#define _TIF_WORK_SYSCALL_EXIT \
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
- _TIF_SYSCALL_TRACEPOINT)
+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
@@ -135,7 +135,8 @@ struct thread_info {
/* work to do on any return to user space */
#define _TIF_ALLWORK_MASK \
- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_NOHZ)
/* Only used for 64 bit */
#define _TIF_DO_NOTIFY_MASK \
@@ -147,31 +148,11 @@ struct thread_info {
(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
-#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
-
-#define PREEMPT_ACTIVE 0x10000000
-
-/* thread information allocation */
-#ifdef CONFIG_DEBUG_STACK_USAGE
-#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
-#else
-#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
-#endif
+#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
-#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+#define STACK_WARN (THREAD_SIZE/8)
+#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
-#define alloc_thread_info_node(tsk, node) \
-({ \
- struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
- THREAD_ORDER); \
- struct thread_info *ret = page ? page_address(page) : NULL; \
- \
- ret; \
-})
-
-#ifdef CONFIG_X86_32
-
-#define STACK_WARN (THREAD_SIZE/8)
/*
* macros/functions for gaining access to the thread information structure
*
@@ -179,46 +160,12 @@ struct thread_info {
*/
#ifndef __ASSEMBLY__
-
-/* how to get the current stack pointer from C */
-register unsigned long current_stack_pointer asm("esp") __used;
-
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
- return (struct thread_info *)
- (current_stack_pointer & ~(THREAD_SIZE - 1));
-}
-
-#else /* !__ASSEMBLY__ */
-
-/* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg) \
- movl $-THREAD_SIZE, reg; \
- andl %esp, reg
-
-/* use this one if reg already contains %esp */
-#define GET_THREAD_INFO_WITH_ESP(reg) \
- andl $-THREAD_SIZE, reg
-
-#endif
-
-#else /* X86_32 */
-
-#include <asm/percpu.h>
-#define KERNEL_STACK_OFFSET (5*8)
-
-/*
- * macros/functions for gaining access to the thread information structure
- * preempt_count needs to be 1 initially, until the scheduler is functional.
- */
-#ifndef __ASSEMBLY__
DECLARE_PER_CPU(unsigned long, kernel_stack);
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
- ti = (void *)(percpu_read_stable(kernel_stack) +
+ ti = (void *)(this_cpu_read_stable(kernel_stack) +
KERNEL_STACK_OFFSET - THREAD_SIZE);
return ti;
}
@@ -227,8 +174,8 @@ static inline struct thread_info *current_thread_info(void)
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg) \
- movq PER_CPU_VAR(kernel_stack),reg ; \
- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
+ _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
+ _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
/*
* Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
@@ -238,8 +185,6 @@ static inline struct thread_info *current_thread_info(void)
#endif
-#endif /* !X86_32 */
-
/*
* Thread-synchronous status.
*
@@ -248,26 +193,49 @@ static inline struct thread_info *current_thread_info(void)
* have to worry about atomic accesses.
*/
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
-#define TS_POLLING 0x0004 /* idle task polling need_resched,
- skip sending interrupt */
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
-#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
-
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
static inline void set_restore_sigmask(void)
{
struct thread_info *ti = current_thread_info();
ti->status |= TS_RESTORE_SIGMASK;
- set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
+ WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+ return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ struct thread_info *ti = current_thread_info();
+ if (!(ti->status & TS_RESTORE_SIGMASK))
+ return false;
+ ti->status &= ~TS_RESTORE_SIGMASK;
+ return true;
+}
+
+static inline bool is_ia32_task(void)
+{
+#ifdef CONFIG_X86_32
+ return true;
+#endif
+#ifdef CONFIG_IA32_EMULATION
+ if (current_thread_info()->status & TS_COMPAT)
+ return true;
+#endif
+ return false;
}
#endif /* !__ASSEMBLY__ */
#ifndef __ASSEMBLY__
extern void arch_task_cache_init(void);
-extern void free_thread_info(struct thread_info *ti);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
-#define arch_task_cache_init arch_task_cache_init
+extern void arch_release_task_struct(struct task_struct *tsk);
#endif
#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 431793e5d48..a04eabd43d0 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -1,9 +1,9 @@
#ifndef _ASM_X86_TIMER_H
#define _ASM_X86_TIMER_H
-#include <linux/init.h>
#include <linux/pm.h>
#include <linux/percpu.h>
#include <linux/interrupt.h>
+#include <linux/math64.h>
#define TICK_SIZE (tick_nsec / 1000)
@@ -12,72 +12,26 @@ extern int recalibrate_cpu_khz(void);
extern int no_timer_check;
-/* Accelerators for sched_clock()
- * convert from cycles(64bits) => nanoseconds (64bits)
- * basic equation:
- * ns = cycles / (freq / ns_per_sec)
- * ns = cycles * (ns_per_sec / freq)
- * ns = cycles * (10^9 / (cpu_khz * 10^3))
- * ns = cycles * (10^6 / cpu_khz)
+/*
+ * We use the full linear equation: f(x) = a + b*x, in order to allow
+ * a continuous function in the face of dynamic freq changes.
*
- * Then we use scaling math (suggested by george@mvista.com) to get:
- * ns = cycles * (10^6 * SC / cpu_khz) / SC
- * ns = cycles * cyc2ns_scale / SC
+ * Continuity means that when our frequency changes our slope (b); we want to
+ * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t.
*
- * And since SC is a constant power of two, we can convert the div
- * into a shift.
+ * Without an offset (a) the above would not be possible.
*
- * We can use khz divisor instead of mhz to keep a better precision, since
- * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
- * (mathieu.desnoyers@polymtl.ca)
- *
- * -johnstul@us.ibm.com "math is hard, lets go shopping!"
- *
- * In:
- *
- * ns = cycles * cyc2ns_scale / SC
- *
- * Although we may still have enough bits to store the value of ns,
- * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
- * leading to an incorrect result.
- *
- * To avoid this, we can decompose 'cycles' into quotient and remainder
- * of division by SC. Then,
- *
- * ns = (quot * SC + rem) * cyc2ns_scale / SC
- * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
- *
- * - sqazi@google.com
+ * See the comment near cycles_2_ns() for details on how we compute (b).
*/
-
-DECLARE_PER_CPU(unsigned long, cyc2ns);
-DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
-
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-
-static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
-{
- unsigned long long quot;
- unsigned long long rem;
- int cpu = smp_processor_id();
- unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
- quot = (cyc >> CYC2NS_SCALE_FACTOR);
- rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
- ns += quot * per_cpu(cyc2ns, cpu) +
- ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
- return ns;
-}
-
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
- unsigned long long ns;
- unsigned long flags;
-
- local_irq_save(flags);
- ns = __cycles_2_ns(cyc);
- local_irq_restore(flags);
-
- return ns;
-}
+struct cyc2ns_data {
+ u32 cyc2ns_mul;
+ u32 cyc2ns_shift;
+ u64 cyc2ns_offset;
+ u32 __count;
+ /* u32 hole */
+}; /* 24 bytes -- do not grow */
+
+extern struct cyc2ns_data *cyc2ns_read_begin(void);
+extern void cyc2ns_read_end(struct cyc2ns_data *);
#endif /* _ASM_X86_TIMER_H */
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 829215fef9e..c7797307fc2 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -4,7 +4,14 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#define tlb_flush(tlb) \
+{ \
+ if (!tlb->fullmm && !tlb->need_flush_all) \
+ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
+ else \
+ flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
+}
#include <asm-generic/tlb.h>
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 169be8938b9..04905bfc508 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -5,7 +5,7 @@
#include <linux/sched.h>
#include <asm/processor.h>
-#include <asm/system.h>
+#include <asm/special_insns.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
@@ -20,10 +20,20 @@ static inline void __native_flush_tlb(void)
native_write_cr3(native_read_cr3());
}
+static inline void __native_flush_tlb_global_irq_disabled(void)
+{
+ unsigned long cr4;
+
+ cr4 = native_read_cr4();
+ /* clear PGE */
+ native_write_cr4(cr4 & ~X86_CR4_PGE);
+ /* write old PGE again and flush TLBs */
+ native_write_cr4(cr4);
+}
+
static inline void __native_flush_tlb_global(void)
{
unsigned long flags;
- unsigned long cr4;
/*
* Read-modify-write to CR4 - protect it from preemption and
@@ -32,11 +42,7 @@ static inline void __native_flush_tlb_global(void)
*/
raw_local_irq_save(flags);
- cr4 = native_read_cr4();
- /* clear PGE */
- native_write_cr4(cr4 & ~X86_CR4_PGE);
- /* write old PGE again and flush TLBs */
- native_write_cr4(cr4);
+ __native_flush_tlb_global_irq_disabled();
raw_local_irq_restore(flags);
}
@@ -56,17 +62,11 @@ static inline void __flush_tlb_all(void)
static inline void __flush_tlb_one(unsigned long addr)
{
- if (cpu_has_invlpg)
- __flush_tlb_single(addr);
- else
- __flush_tlb();
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
+ __flush_tlb_single(addr);
}
-#ifdef CONFIG_X86_32
-# define TLB_FLUSH_ALL 0xffffffff
-#else
-# define TLB_FLUSH_ALL -1ULL
-#endif
+#define TLB_FLUSH_ALL -1UL
/*
* TLB flushing:
@@ -77,26 +77,46 @@ static inline void __flush_tlb_one(unsigned long addr)
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
+ * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
- *
- * x86-64 can only flush individual pages or full VMs. For a range flush
- * we always do the full VM. Might be worth trying if for a small
- * range a few INVLPGs in a row are a win.
*/
#ifndef CONFIG_SMP
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb_all()
-#define local_flush_tlb() __flush_tlb()
+/* "_up" is for UniProcessor.
+ *
+ * This is a helper for other header functions. *Not* intended to be called
+ * directly. All global TLB flushes need to either call this, or to bump the
+ * vm statistics themselves.
+ */
+static inline void __flush_tlb_up(void)
+{
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ __flush_tlb();
+}
+
+static inline void flush_tlb_all(void)
+{
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ __flush_tlb_all();
+}
+
+static inline void flush_tlb(void)
+{
+ __flush_tlb_up();
+}
+
+static inline void local_flush_tlb(void)
+{
+ __flush_tlb_up();
+}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
- __flush_tlb();
+ __flush_tlb_up();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -110,12 +130,20 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (vma->vm_mm == current->active_mm)
- __flush_tlb();
+ __flush_tlb_up();
+}
+
+static inline void flush_tlb_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end, unsigned long vmflag)
+{
+ if (mm == current->active_mm)
+ __flush_tlb_up();
}
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
- unsigned long va)
+ unsigned long start,
+ unsigned long end)
{
}
@@ -123,27 +151,35 @@ static inline void reset_lazy_tlbstate(void)
{
}
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
#else /* SMP */
#include <asm/smp.h>
#define local_flush_tlb() __flush_tlb()
+#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
+
+#define flush_tlb_range(vma, start, end) \
+ flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
+
extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned long vmflag);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define flush_tlb() flush_tlb_current_task()
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- flush_tlb_mm(vma->vm_mm);
-}
-
void native_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm, unsigned long va);
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end);
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
@@ -156,20 +192,15 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
static inline void reset_lazy_tlbstate(void)
{
- percpu_write(cpu_tlbstate.state, 0);
- percpu_write(cpu_tlbstate.active_mm, &init_mm);
+ this_cpu_write(cpu_tlbstate.state, 0);
+ this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
}
#endif /* SMP */
#ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
+#define flush_tlb_others(mask, mm, start, end) \
+ native_flush_tlb_others(mask, mm, start, end)
#endif
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
- flush_tlb_all();
-}
-
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index b9676ae37ad..0e8f04f2c26 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -92,44 +92,6 @@ extern void setup_node_to_cpumask_map(void);
#define pcibus_to_node(bus) __pcibus_to_node(bus)
-#ifdef CONFIG_X86_32
-# define SD_CACHE_NICE_TRIES 1
-# define SD_IDLE_IDX 1
-#else
-# define SD_CACHE_NICE_TRIES 2
-# define SD_IDLE_IDX 2
-#endif
-
-/* sched_domains SD_NODE_INIT for NUMA machines */
-#define SD_NODE_INIT (struct sched_domain) { \
- .min_interval = 8, \
- .max_interval = 32, \
- .busy_factor = 32, \
- .imbalance_pct = 125, \
- .cache_nice_tries = SD_CACHE_NICE_TRIES, \
- .busy_idx = 3, \
- .idle_idx = SD_IDLE_IDX, \
- .newidle_idx = 0, \
- .wake_idx = 0, \
- .forkexec_idx = 0, \
- \
- .flags = 1*SD_LOAD_BALANCE \
- | 1*SD_BALANCE_NEWIDLE \
- | 1*SD_BALANCE_EXEC \
- | 1*SD_BALANCE_FORK \
- | 0*SD_BALANCE_WAKE \
- | 1*SD_WAKE_AFFINE \
- | 0*SD_PREFER_LOCAL \
- | 0*SD_SHARE_CPUPOWER \
- | 0*SD_POWERSAVINGS_BALANCE \
- | 0*SD_SHARE_PKG_RESOURCES \
- | 1*SD_SERIALIZE \
- | 0*SD_PREFER_SIBLING \
- , \
- .last_balance = jiffies, \
- .balance_interval = 1, \
-}
-
extern int __node_distance(int, int);
#define node_distance(a, b) __node_distance(a, b)
@@ -157,14 +119,12 @@ static inline void setup_node_to_cpumask_map(void) { }
extern const struct cpumask *cpu_coregroup_mask(int cpu);
-#ifdef ENABLE_TOPO_DEFINES
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
+
+#ifdef ENABLE_TOPO_DEFINES
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
-
-/* indicates that pointers to the topology cpumask_t maps are valid */
-#define arch_provides_topology_pointers yes
#endif
static inline void arch_fix_phys_package_id(int num, u32 slot)
@@ -172,25 +132,7 @@ static inline void arch_fix_phys_package_id(int num, u32 slot)
}
struct pci_bus;
+int x86_pci_root_bus_node(int bus);
void x86_pci_root_bus_resources(int bus, struct list_head *resources);
-#ifdef CONFIG_SMP
-#define mc_capable() ((boot_cpu_data.x86_max_cores > 1) && \
- (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids))
-#define smt_capable() (smp_num_siblings > 1)
-#endif
-
-#ifdef CONFIG_NUMA
-extern int get_mp_bus_to_node(int busnum);
-extern void set_mp_bus_to_node(int busnum, int node);
-#else
-static inline int get_mp_bus_to_node(int busnum)
-{
- return 0;
-}
-static inline void set_mp_bus_to_node(int busnum, int node)
-{
-}
-#endif
-
#endif /* _ASM_X86_TOPOLOGY_H */
diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h
new file mode 100644
index 00000000000..2fbc66c7885
--- /dev/null
+++ b/arch/x86/include/asm/trace/exceptions.h
@@ -0,0 +1,52 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM exceptions
+
+#if !defined(_TRACE_PAGE_FAULT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PAGE_FAULT_H
+
+#include <linux/tracepoint.h>
+
+extern void trace_irq_vector_regfunc(void);
+extern void trace_irq_vector_unregfunc(void);
+
+DECLARE_EVENT_CLASS(x86_exceptions,
+
+ TP_PROTO(unsigned long address, struct pt_regs *regs,
+ unsigned long error_code),
+
+ TP_ARGS(address, regs, error_code),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, address )
+ __field( unsigned long, ip )
+ __field( unsigned long, error_code )
+ ),
+
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->ip = regs->ip;
+ __entry->error_code = error_code;
+ ),
+
+ TP_printk("address=%pf ip=%pf error_code=0x%lx",
+ (void *)__entry->address, (void *)__entry->ip,
+ __entry->error_code) );
+
+#define DEFINE_PAGE_FAULT_EVENT(name) \
+DEFINE_EVENT_FN(x86_exceptions, name, \
+ TP_PROTO(unsigned long address, struct pt_regs *regs, \
+ unsigned long error_code), \
+ TP_ARGS(address, regs, error_code), \
+ trace_irq_vector_regfunc, \
+ trace_irq_vector_unregfunc);
+
+DEFINE_PAGE_FAULT_EVENT(page_fault_user);
+DEFINE_PAGE_FAULT_EVENT(page_fault_kernel);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE exceptions
+#endif /* _TRACE_PAGE_FAULT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
new file mode 100644
index 00000000000..4cab890007a
--- /dev/null
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -0,0 +1,115 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irq_vectors
+
+#if !defined(_TRACE_IRQ_VECTORS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IRQ_VECTORS_H
+
+#include <linux/tracepoint.h>
+
+extern void trace_irq_vector_regfunc(void);
+extern void trace_irq_vector_unregfunc(void);
+
+DECLARE_EVENT_CLASS(x86_irq_vector,
+
+ TP_PROTO(int vector),
+
+ TP_ARGS(vector),
+
+ TP_STRUCT__entry(
+ __field( int, vector )
+ ),
+
+ TP_fast_assign(
+ __entry->vector = vector;
+ ),
+
+ TP_printk("vector=%d", __entry->vector) );
+
+#define DEFINE_IRQ_VECTOR_EVENT(name) \
+DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \
+ TP_PROTO(int vector), \
+ TP_ARGS(vector), \
+ trace_irq_vector_regfunc, \
+ trace_irq_vector_unregfunc); \
+DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
+ TP_PROTO(int vector), \
+ TP_ARGS(vector), \
+ trace_irq_vector_regfunc, \
+ trace_irq_vector_unregfunc);
+
+
+/*
+ * local_timer - called when entering/exiting a local timer interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(local_timer);
+
+/*
+ * reschedule - called when entering/exiting a reschedule vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(reschedule);
+
+/*
+ * spurious_apic - called when entering/exiting a spurious apic vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(spurious_apic);
+
+/*
+ * error_apic - called when entering/exiting an error apic vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(error_apic);
+
+/*
+ * x86_platform_ipi - called when entering/exiting a x86 platform ipi interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
+
+/*
+ * irq_work - called when entering/exiting a irq work interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(irq_work);
+
+/*
+ * We must dis-allow sampling irq_work_exit() because perf event sampling
+ * itself can cause irq_work, which would lead to an infinite loop;
+ *
+ * 1) irq_work_exit happens
+ * 2) generates perf sample
+ * 3) generates irq_work
+ * 4) goto 1
+ */
+TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
+
+/*
+ * call_function - called when entering/exiting a call function interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(call_function);
+
+/*
+ * call_function_single - called when entering/exiting a call function
+ * single interrupt vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(call_function_single);
+
+/*
+ * threshold_apic - called when entering/exiting a threshold apic interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(threshold_apic);
+
+/*
+ * thermal_apic - called when entering/exiting a thermal apic interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(thermal_apic);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE irq_vectors
+#endif /* _TRACE_IRQ_VECTORS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/x86/include/asm/trace_clock.h b/arch/x86/include/asm/trace_clock.h
new file mode 100644
index 00000000000..beab86cc282
--- /dev/null
+++ b/arch/x86/include/asm/trace_clock.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_X86_TRACE_CLOCK_H
+#define _ASM_X86_TRACE_CLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_X86_TSC
+
+extern u64 notrace trace_clock_x86_tsc(void);
+
+# define ARCH_TRACE_CLOCKS \
+ { trace_clock_x86_tsc, "x86-tsc", .in_ns = 0 },
+
+#else /* !CONFIG_X86_TSC */
+
+#define ARCH_TRACE_CLOCKS
+
+#endif
+
+#endif /* _ASM_X86_TRACE_CLOCK_H */
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
deleted file mode 100644
index feca3118a73..00000000000
--- a/arch/x86/include/asm/trampoline.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef _ASM_X86_TRAMPOLINE_H
-#define _ASM_X86_TRAMPOLINE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <asm/io.h>
-
-/*
- * Trampoline 80x86 program as an array. These are in the init rodata
- * segment, but that's okay, because we only care about the relative
- * addresses of the symbols.
- */
-extern const unsigned char x86_trampoline_start [];
-extern const unsigned char x86_trampoline_end [];
-extern unsigned char *x86_trampoline_base;
-
-extern unsigned long init_rsp;
-extern unsigned long initial_code;
-extern unsigned long initial_gs;
-
-extern void __init setup_trampolines(void);
-
-extern const unsigned char trampoline_data[];
-extern const unsigned char trampoline_status[];
-
-#define TRAMPOLINE_SYM(x) \
- ((void *)(x86_trampoline_base + \
- ((const unsigned char *)(x) - x86_trampoline_start)))
-
-/* Address of the SMP trampoline */
-static inline unsigned long trampoline_address(void)
-{
- return virt_to_phys(TRAMPOLINE_SYM(trampoline_data));
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_X86_TRAMPOLINE_H */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 0012d0902c5..bc8352e7010 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -6,11 +6,7 @@
#include <asm/debugreg.h>
#include <asm/siginfo.h> /* TRAP_TRACE, ... */
-#ifdef CONFIG_X86_32
-#define dotraplinkage
-#else
-#define dotraplinkage asmlinkage
-#endif
+#define dotraplinkage __visible
asmlinkage void divide_error(void);
asmlinkage void debug(void);
@@ -41,6 +37,23 @@ asmlinkage void machine_check(void);
#endif /* CONFIG_X86_MCE */
asmlinkage void simd_coprocessor_error(void);
+#ifdef CONFIG_TRACING
+asmlinkage void trace_page_fault(void);
+#define trace_divide_error divide_error
+#define trace_bounds bounds
+#define trace_invalid_op invalid_op
+#define trace_device_not_available device_not_available
+#define trace_coprocessor_segment_overrun coprocessor_segment_overrun
+#define trace_invalid_TSS invalid_TSS
+#define trace_segment_not_present segment_not_present
+#define trace_general_protection general_protection
+#define trace_spurious_interrupt_bug spurious_interrupt_bug
+#define trace_coprocessor_error coprocessor_error
+#define trace_alignment_check alignment_check
+#define trace_simd_coprocessor_error simd_coprocessor_error
+#define trace_async_page_fault async_page_fault
+#endif
+
dotraplinkage void do_divide_error(struct pt_regs *, long);
dotraplinkage void do_debug(struct pt_regs *, long);
dotraplinkage void do_nmi(struct pt_regs *, long);
@@ -55,10 +68,18 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long);
dotraplinkage void do_stack_segment(struct pt_regs *, long);
#ifdef CONFIG_X86_64
dotraplinkage void do_double_fault(struct pt_regs *, long);
-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *);
+asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
#endif
dotraplinkage void do_general_protection(struct pt_regs *, long);
dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
+#ifdef CONFIG_TRACING
+dotraplinkage void trace_do_page_fault(struct pt_regs *, unsigned long);
+#else
+static inline void trace_do_page_fault(struct pt_regs *regs, unsigned long error)
+{
+ do_page_fault(regs, error);
+}
+#endif
dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long);
dotraplinkage void do_coprocessor_error(struct pt_regs *, long);
dotraplinkage void do_alignment_check(struct pt_regs *, long);
@@ -82,11 +103,35 @@ static inline int get_si_code(unsigned long condition)
extern int panic_on_unrecovered_nmi;
-void math_error(struct pt_regs *, int, int);
void math_emulate(struct math_emu_info *);
#ifndef CONFIG_X86_32
asmlinkage void smp_thermal_interrupt(void);
asmlinkage void mce_threshold_interrupt(void);
#endif
+/* Interrupts/Exceptions */
+enum {
+ X86_TRAP_DE = 0, /* 0, Divide-by-zero */
+ X86_TRAP_DB, /* 1, Debug */
+ X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
+ X86_TRAP_BP, /* 3, Breakpoint */
+ X86_TRAP_OF, /* 4, Overflow */
+ X86_TRAP_BR, /* 5, Bound Range Exceeded */
+ X86_TRAP_UD, /* 6, Invalid Opcode */
+ X86_TRAP_NM, /* 7, Device Not Available */
+ X86_TRAP_DF, /* 8, Double Fault */
+ X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
+ X86_TRAP_TS, /* 10, Invalid TSS */
+ X86_TRAP_NP, /* 11, Segment Not Present */
+ X86_TRAP_SS, /* 12, Stack Segment Fault */
+ X86_TRAP_GP, /* 13, General Protection Fault */
+ X86_TRAP_PF, /* 14, Page Fault */
+ X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
+ X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
+ X86_TRAP_AC, /* 17, Alignment Check */
+ X86_TRAP_MC, /* 18, Machine Check */
+ X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
+ X86_TRAP_IRET = 32, /* 32, IRET Exception */
+};
+
#endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 15d99153a96..94605c0e9ce 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -49,6 +49,7 @@ extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
+extern int check_tsc_disabled(void);
extern unsigned long native_calibrate_tsc(void);
extern int tsc_clocksource_reliable;
@@ -61,7 +62,10 @@ extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void);
extern int notsc_setup(char *);
-extern void save_sched_clock_state(void);
-extern void restore_sched_clock_state(void);
+extern void tsc_save_sched_clock_state(void);
+extern void tsc_restore_sched_clock_state(void);
+
+/* MSR based TSC calibration for Intel Atom SoC platforms */
+unsigned long try_msr_calibrate_tsc(void);
#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h
deleted file mode 100644
index 8e8c23fef08..00000000000
--- a/arch/x86/include/asm/types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_X86_TYPES_H
-#define _ASM_X86_TYPES_H
-
-#include <asm-generic/types.h>
-
-#endif /* _ASM_X86_TYPES_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8be5f54d936..0d592e0a5b8 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -9,6 +9,7 @@
#include <linux/string.h>
#include <asm/asm.h>
#include <asm/page.h>
+#include <asm/smap.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -32,29 +33,37 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
-#define __addr_ok(addr) \
- ((unsigned long __force)(addr) < \
- (current_thread_info()->addr_limit.seg))
+#define user_addr_max() (current_thread_info()->addr_limit.seg)
+#define __addr_ok(addr) \
+ ((unsigned long __force)(addr) < user_addr_max())
/*
* Test whether a block of memory is a valid user space address.
* Returns 0 if the range is valid, nonzero otherwise.
- *
- * This is equivalent to the following test:
- * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
- *
- * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
*/
-
-#define __range_not_ok(addr, size) \
+static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
+{
+ /*
+ * If we have used "sizeof()" for the size,
+ * we know it won't overflow the limit (but
+ * it might overflow the 'addr', so it's
+ * important to subtract the size from the
+ * limit, not add it to the address).
+ */
+ if (__builtin_constant_p(size))
+ return addr > limit - size;
+
+ /* Arbitrary sizes? Be careful about overflow */
+ addr += size;
+ if (addr < size)
+ return true;
+ return addr > limit;
+}
+
+#define __range_not_ok(addr, size, limit) \
({ \
- unsigned long flag, roksum; \
__chk_user_ptr(addr); \
- asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
- : "=&r" (flag), "=r" (roksum) \
- : "1" (addr), "g" ((long)(size)), \
- "rm" (current_thread_info()->addr_limit.seg)); \
- flag; \
+ __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
/**
@@ -76,14 +85,16 @@
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
+#define access_ok(type, addr, size) \
+ likely(!__range_not_ok(addr, size, user_addr_max()))
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
+ * The exception table consists of pairs of addresses relative to the
+ * exception table enty itself: the first is the address of an
+ * instruction that is allowed to fault, and the second is the address
+ * at which the program should continue. No registers are modified,
+ * so it is entirely up to the continuation code to figure out what to
+ * do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
@@ -92,10 +103,14 @@
*/
struct exception_table_entry {
- unsigned long insn, fixup;
+ int insn, fixup;
};
+/* This is not the generic standard exception_table_entry format */
+#define ARCH_HAS_SORT_EXTABLE
+#define ARCH_HAS_SEARCH_EXTABLE
extern int fixup_exception(struct pt_regs *regs);
+extern int early_fixup_exception(unsigned long *ip);
/*
* These are the main single-value transfer routines. They automatically
@@ -118,13 +133,12 @@ extern int __get_user_4(void);
extern int __get_user_8(void);
extern int __get_user_bad(void);
-#define __get_user_x(size, ret, x, ptr) \
- asm volatile("call __get_user_" #size \
- : "=a" (ret), "=d" (x) \
- : "0" (ptr)) \
-
-/* Careful: we have to cast the result to the type of the pointer
- * for sign reasons */
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __inttype(x) \
+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
/**
* get_user: - Get a simple variable from user space.
@@ -143,38 +157,29 @@ extern int __get_user_bad(void);
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
-#ifdef CONFIG_X86_32
-#define __get_user_8(__ret_gu, __val_gu, ptr) \
- __get_user_x(X, __ret_gu, __val_gu, ptr)
-#else
-#define __get_user_8(__ret_gu, __val_gu, ptr) \
- __get_user_x(8, __ret_gu, __val_gu, ptr)
-#endif
-
+/*
+ * Careful: we have to cast the result to the type of the pointer
+ * for sign reasons.
+ *
+ * The use of _ASM_DX as the register specifier is a bit of a
+ * simplification, as gcc only cares about it as the starting point
+ * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
+ * (%ecx being the next register in gcc's x86 register sequence), and
+ * %rdx on 64 bits.
+ *
+ * Clang/LLVM cares about the size of the register, but still wants
+ * the base register for something that ends up being a pair.
+ */
#define get_user(x, ptr) \
({ \
int __ret_gu; \
- unsigned long __val_gu; \
+ register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
might_fault(); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- __get_user_x(1, __ret_gu, __val_gu, ptr); \
- break; \
- case 2: \
- __get_user_x(2, __ret_gu, __val_gu, ptr); \
- break; \
- case 4: \
- __get_user_x(4, __ret_gu, __val_gu, ptr); \
- break; \
- case 8: \
- __get_user_8(__ret_gu, __val_gu, ptr); \
- break; \
- default: \
- __get_user_x(X, __ret_gu, __val_gu, ptr); \
- break; \
- } \
- (x) = (__typeof__(*(ptr)))__val_gu; \
+ asm volatile("call __get_user_%P3" \
+ : "=a" (__ret_gu), "=r" (__val_gu) \
+ : "0" (ptr), "i" (sizeof(*(ptr)))); \
+ (x) = (__typeof__(*(ptr))) __val_gu; \
__ret_gu; \
})
@@ -186,9 +191,10 @@ extern int __get_user_bad(void);
#ifdef CONFIG_X86_32
#define __put_user_asm_u64(x, addr, err, errret) \
- asm volatile("1: movl %%eax,0(%2)\n" \
+ asm volatile(ASM_STAC "\n" \
+ "1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \
- "3:\n" \
+ "3: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
" jmp 3b\n" \
@@ -199,11 +205,12 @@ extern int __get_user_bad(void);
: "A" (x), "r" (addr), "i" (errret), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \
- asm volatile("1: movl %%eax,0(%1)\n" \
+ asm volatile(ASM_STAC "\n" \
+ "1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
- "3:\n" \
- _ASM_EXTABLE(1b, 2b - 1b) \
- _ASM_EXTABLE(2b, 3b - 2b) \
+ "3: " ASM_CLAC "\n" \
+ _ASM_EXTABLE_EX(1b, 2b) \
+ _ASM_EXTABLE_EX(2b, 3b) \
: : "A" (x), "r" (addr))
#define __put_user_x8(x, ptr, __ret_pu) \
@@ -228,8 +235,6 @@ extern void __put_user_2(void);
extern void __put_user_4(void);
extern void __put_user_8(void);
-#ifdef CONFIG_X86_WP_WORKS_OK
-
/**
* put_user: - Write a simple value into user space.
* @x: Value to copy to user space.
@@ -317,29 +322,6 @@ do { \
} \
} while (0)
-#else
-
-#define __put_user_size(x, ptr, size, retval, errret) \
-do { \
- __typeof__(*(ptr))__pus_tmp = x; \
- retval = 0; \
- \
- if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
- retval = errret; \
-} while (0)
-
-#define put_user(x, ptr) \
-({ \
- int __ret_pu; \
- __typeof__(*(ptr))__pus_tmp = x; \
- __ret_pu = 0; \
- if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
- sizeof(*(ptr))) != 0)) \
- __ret_pu = -EFAULT; \
- __ret_pu; \
-})
-#endif
-
#ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
@@ -373,8 +355,9 @@ do { \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
- "2:\n" \
+ asm volatile(ASM_STAC "\n" \
+ "1: mov"itype" %2,%"rtype"1\n" \
+ "2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" xor"itype" %"rtype"1,%"rtype"1\n" \
@@ -408,7 +391,7 @@ do { \
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
- _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE_EX(1b, 2b) \
: ltype(x) : "m" (__m(addr)))
#define __put_user_nocheck(x, ptr, size) \
@@ -437,8 +420,9 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
- "2:\n" \
+ asm volatile(ASM_STAC "\n" \
+ "1: mov"itype" %"rtype"1,%2\n" \
+ "2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" jmp 2b\n" \
@@ -450,20 +434,20 @@ struct __large_struct { unsigned long buf[100]; };
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \
"2:\n" \
- _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE_EX(1b, 2b) \
: : ltype(x), "m" (__m(addr)))
/*
* uaccess_try and catch
*/
#define uaccess_try do { \
- int prev_err = current_thread_info()->uaccess_err; \
current_thread_info()->uaccess_err = 0; \
+ stac(); \
barrier();
#define uaccess_catch(err) \
+ clac(); \
(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
- current_thread_info()->uaccess_err = prev_err; \
} while (0)
/**
@@ -532,31 +516,114 @@ struct __large_struct { unsigned long buf[100]; };
(x) = (__force __typeof__(*(ptr)))__gue_val; \
} while (0)
-#ifdef CONFIG_X86_WP_WORKS_OK
-
#define put_user_try uaccess_try
#define put_user_catch(err) uaccess_catch(err)
#define put_user_ex(x, ptr) \
__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-#else /* !CONFIG_X86_WP_WORKS_OK */
+extern unsigned long
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
+extern __must_check long
+strncpy_from_user(char *dst, const char __user *src, long count);
-#define put_user_try do { \
- int __uaccess_err = 0;
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
-#define put_user_catch(err) \
- (err) |= __uaccess_err; \
-} while (0)
+unsigned long __must_check clear_user(void __user *mem, unsigned long len);
+unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
-#define put_user_ex(x, ptr) do { \
- __uaccess_err |= __put_user(x, ptr); \
-} while (0)
+extern void __cmpxchg_wrong_size(void)
+ __compiletime_error("Bad argument size for cmpxchg");
-#endif /* CONFIG_X86_WP_WORKS_OK */
+#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
+({ \
+ int __ret = 0; \
+ __typeof__(ptr) __uval = (uval); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ switch (size) { \
+ case 1: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "q" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 2: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 4: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 8: \
+ { \
+ if (!IS_ENABLED(CONFIG_X86_64)) \
+ __cmpxchg_wrong_size(); \
+ \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ default: \
+ __cmpxchg_wrong_size(); \
+ } \
+ *__uval = __old; \
+ __ret; \
+})
-extern unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
+#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
+({ \
+ access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
+ __user_atomic_cmpxchg_inatomic((uval), (ptr), \
+ (old), (new), sizeof(*(ptr))) : \
+ -EFAULT; \
+})
/*
* movsl can be slow when source and dest are not both 8-byte aligned
@@ -570,10 +637,108 @@ extern struct movsl_mask {
#define ARCH_HAS_NOCACHE_UACCESS 1
#ifdef CONFIG_X86_32
-# include "uaccess_32.h"
+# include <asm/uaccess_32.h>
#else
-# include "uaccess_64.h"
+# include <asm/uaccess_64.h>
#endif
+unsigned long __must_check _copy_from_user(void *to, const void __user *from,
+ unsigned n);
+unsigned long __must_check _copy_to_user(void __user *to, const void *from,
+ unsigned n);
+
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+# define copy_user_diag __compiletime_error
+#else
+# define copy_user_diag __compiletime_warning
+#endif
+
+extern void copy_user_diag("copy_from_user() buffer size is too small")
+copy_from_user_overflow(void);
+extern void copy_user_diag("copy_to_user() buffer size is too small")
+copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
+
+#undef copy_user_diag
+
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+
+extern void
+__compiletime_warning("copy_from_user() buffer size is not provably correct")
+__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
+#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
+
+extern void
+__compiletime_warning("copy_to_user() buffer size is not provably correct")
+__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
+#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
+
+#else
+
+static inline void
+__copy_from_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
+#define __copy_to_user_overflow __copy_from_user_overflow
+
+#endif
+
+static inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(to);
+
+ might_fault();
+
+ /*
+ * While we would like to have the compiler do the checking for us
+ * even in the non-constant size case, any false positives there are
+ * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
+ * without - the [hopefully] dangerous looking nature of the warning
+ * would make people go look at the respecitive call sites over and
+ * over again just to find that there's no problem).
+ *
+ * And there are cases where it's just not realistic for the compiler
+ * to prove the count to be in range. For example when multiple call
+ * sites of a helper function - perhaps in different source files -
+ * all doing proper range checking, yet the helper function not doing
+ * so again.
+ *
+ * Therefore limit the compile time checking to the constant size
+ * case, and do only runtime checking for non-constant sizes.
+ */
+
+ if (likely(sz < 0 || sz >= n))
+ n = _copy_from_user(to, from, n);
+ else if(__builtin_constant_p(n))
+ copy_from_user_overflow();
+ else
+ __copy_from_user_overflow(sz, n);
+
+ return n;
+}
+
+static inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(from);
+
+ might_fault();
+
+ /* See the comment in copy_from_user() above. */
+ if (likely(sz < 0 || sz >= n))
+ n = _copy_to_user(to, from, n);
+ else if(__builtin_constant_p(n))
+ copy_to_user_overflow();
+ else
+ __copy_to_user_overflow(sz, n);
+
+ return n;
+}
+
+#undef __copy_from_user_overflow
+#undef __copy_to_user_overflow
+
#endif /* _ASM_X86_UACCESS_H */
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 566e803cc60..3c03a5de64d 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -184,58 +184,4 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
return __copy_from_user_ll_nocache_nozero(to, from, n);
}
-unsigned long __must_check copy_to_user(void __user *to,
- const void *from, unsigned long n);
-unsigned long __must_check _copy_from_user(void *to,
- const void __user *from,
- unsigned long n);
-
-
-extern void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
- __compiletime_error("copy_from_user() buffer size is not provably correct")
-#else
- __compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
-
-static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
-{
- int sz = __compiletime_object_size(to);
-
- if (likely(sz == -1 || sz >= n))
- n = _copy_from_user(to, from, n);
- else
- copy_from_user_overflow();
-
- return n;
-}
-
-long __must_check strncpy_from_user(char *dst, const char __user *src,
- long count);
-long __must_check __strncpy_from_user(char *dst,
- const char __user *src, long count);
-
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only. This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str) strnlen_user(str, LONG_MAX)
-
-long strnlen_user(const char __user *str, long n);
-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
-
#endif /* _ASM_X86_UACCESS_32_H */
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 1c66d30971a..12a26b979bf 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -17,6 +17,8 @@
/* Handles exceptions in both to and from, but doesn't do access_ok */
__must_check unsigned long
+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
+__must_check unsigned long
copy_user_generic_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_unrolled(void *to, const void *from, unsigned len);
@@ -26,9 +28,16 @@ copy_user_generic(void *to, const void *from, unsigned len)
{
unsigned ret;
- alternative_call(copy_user_generic_unrolled,
+ /*
+ * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
+ * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
+ * Otherwise, use copy_user_generic_unrolled.
+ */
+ alternative_call_2(copy_user_generic_unrolled,
copy_user_generic_string,
X86_FEATURE_REP_GOOD,
+ copy_user_enhanced_fast_string,
+ X86_FEATURE_ERMS,
ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
"=d" (len)),
"1" (to), "2" (from), "3" (len)
@@ -37,42 +46,13 @@ copy_user_generic(void *to, const void *from, unsigned len)
}
__must_check unsigned long
-_copy_to_user(void __user *to, const void *from, unsigned len);
-__must_check unsigned long
-_copy_from_user(void *to, const void __user *from, unsigned len);
-__must_check unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned len);
-static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
-{
- int sz = __compiletime_object_size(to);
-
- might_fault();
- if (likely(sz == -1 || sz >= n))
- n = _copy_from_user(to, from, n);
-#ifdef CONFIG_DEBUG_VM
- else
- WARN(1, "Buffer overflow detected!\n");
-#endif
- return n;
-}
-
static __always_inline __must_check
-int copy_to_user(void __user *dst, const void *src, unsigned size)
-{
- might_fault();
-
- return _copy_to_user(dst, src, size);
-}
-
-static __always_inline __must_check
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
+int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
- might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
@@ -112,11 +92,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
}
static __always_inline __must_check
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
+int __copy_from_user(void *dst, const void __user *src, unsigned size)
+{
+ might_fault();
+ return __copy_from_user_nocheck(dst, src, size);
+}
+
+static __always_inline __must_check
+int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
- might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
@@ -156,6 +142,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
}
static __always_inline __must_check
+int __copy_to_user(void __user *dst, const void *src, unsigned size)
+{
+ might_fault();
+ return __copy_to_user_nocheck(dst, src, size);
+}
+
+static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
int ret = 0;
@@ -208,26 +201,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
}
}
-__must_check long
-strncpy_from_user(char *dst, const char __user *src, long count);
-__must_check long
-__strncpy_from_user(char *dst, const char __user *src, long count);
-__must_check long strnlen_user(const char __user *str, long n);
-__must_check long __strnlen_user(const char __user *str, long n);
-__must_check long strlen_user(const char __user *str);
-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
-
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
- return copy_user_generic(dst, (__force const void *)src, size);
+ return __copy_from_user_nocheck(dst, src, size);
}
static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
- return copy_user_generic((__force void *)dst, src, size);
+ return __copy_to_user_nocheck(dst, src, size);
}
extern long __copy_user_nocache(void *dst, const void __user *src,
@@ -236,7 +219,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src,
static inline int
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
{
- might_sleep();
+ might_fault();
return __copy_user_nocache(dst, src, size, 1);
}
diff --git a/arch/x86/include/asm/ucontext.h b/arch/x86/include/asm/ucontext.h
deleted file mode 100644
index b7c29c8017f..00000000000
--- a/arch/x86/include/asm/ucontext.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_UCONTEXT_H
-#define _ASM_X86_UCONTEXT_H
-
-#define UC_FP_XSTATE 0x1 /* indicates the presence of extended state
- * information in the memory layout pointed
- * by the fpstate pointer in the ucontext's
- * sigcontext struct (uc_mcontext).
- */
-
-#include <asm-generic/ucontext.h>
-
-#endif /* _ASM_X86_UCONTEXT_H */
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 21f77b89e47..2b19caa4081 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -1,11 +1,18 @@
#ifndef _ASM_X86_UNISTD_H
#define _ASM_X86_UNISTD_H 1
-#ifdef __KERNEL__
+#include <uapi/asm/unistd.h>
+
+
+# ifdef CONFIG_X86_X32_ABI
+# define __SYSCALL_MASK (~(__X32_SYSCALL_BIT))
+# else
+# define __SYSCALL_MASK (~0)
+# endif
+
# ifdef CONFIG_X86_32
# include <asm/unistd_32.h>
-# define __ARCH_WANT_IPC_PARSE_VERSION
# define __ARCH_WANT_STAT64
# define __ARCH_WANT_SYS_IPC
# define __ARCH_WANT_SYS_OLD_MMAP
@@ -14,7 +21,11 @@
# else
# include <asm/unistd_64.h>
+# include <asm/unistd_64_x32.h>
# define __ARCH_WANT_COMPAT_SYS_TIME
+# define __ARCH_WANT_COMPAT_SYS_GETDENTS64
+# define __ARCH_WANT_COMPAT_SYS_PREADV64
+# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
# endif
@@ -30,9 +41,6 @@
# define __ARCH_WANT_SYS_OLD_GETRLIMIT
# define __ARCH_WANT_SYS_OLD_UNAME
# define __ARCH_WANT_SYS_PAUSE
-# define __ARCH_WANT_SYS_RT_SIGACTION
-# define __ARCH_WANT_SYS_RT_SIGSUSPEND
-# define __ARCH_WANT_SYS_SGETMASK
# define __ARCH_WANT_SYS_SIGNAL
# define __ARCH_WANT_SYS_SIGPENDING
# define __ARCH_WANT_SYS_SIGPROCMASK
@@ -40,21 +48,8 @@
# define __ARCH_WANT_SYS_TIME
# define __ARCH_WANT_SYS_UTIME
# define __ARCH_WANT_SYS_WAITPID
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-# define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-
-#else
-# ifdef __i386__
-# include <asm/unistd_32.h>
-# else
-# include <asm/unistd_64.h>
-# endif
-#endif
+# define __ARCH_WANT_SYS_FORK
+# define __ARCH_WANT_SYS_VFORK
+# define __ARCH_WANT_SYS_CLONE
#endif /* _ASM_X86_UNISTD_H */
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
new file mode 100644
index 00000000000..74f4c2ff642
--- /dev/null
+++ b/arch/x86/include/asm/uprobes.h
@@ -0,0 +1,67 @@
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+/*
+ * User-space Probes (UProbes) for x86
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2008-2011
+ * Authors:
+ * Srikar Dronamraju
+ * Jim Keniston
+ */
+
+#include <linux/notifier.h>
+
+typedef u8 uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 16
+#define UPROBE_XOL_SLOT_BYTES 128 /* to keep it cache aligned */
+
+#define UPROBE_SWBP_INSN 0xcc
+#define UPROBE_SWBP_INSN_SIZE 1
+
+struct uprobe_xol_ops;
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u8 ixol[MAX_UINSN_BYTES];
+ };
+
+ const struct uprobe_xol_ops *ops;
+
+ union {
+ struct {
+ s32 offs;
+ u8 ilen;
+ u8 opc1;
+ } branch;
+ struct {
+ u8 fixups;
+ u8 ilen;
+ } defparam;
+ };
+};
+
+struct arch_uprobe_task {
+#ifdef CONFIG_X86_64
+ unsigned long saved_scratch_register;
+#endif
+ unsigned int saved_trap_nr;
+ unsigned int saved_tf;
+};
+
+#endif /* _ASM_UPROBES_H */
diff --git a/arch/x86/include/asm/user.h b/arch/x86/include/asm/user.h
index 24532c7da3d..ccab4af1646 100644
--- a/arch/x86/include/asm/user.h
+++ b/arch/x86/include/asm/user.h
@@ -2,9 +2,9 @@
#define _ASM_X86_USER_H
#ifdef CONFIG_X86_32
-# include "user_32.h"
+# include <asm/user_32.h>
#else
-# include "user_64.h"
+# include <asm/user_64.h>
#endif
#include <asm/types.h>
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 3bb9491b765..062921ef34e 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -15,7 +15,8 @@ extern void uv_nmi_init(void);
extern void uv_system_init(void);
extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
- unsigned long va,
+ unsigned long start,
+ unsigned long end,
unsigned int cpu);
#else /* X86_UV */
@@ -26,7 +27,7 @@ static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
static inline const struct cpumask *
uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
- unsigned long va, unsigned int cpu)
+ unsigned long start, unsigned long end, unsigned int cpu)
{ return cpumask; }
#endif /* X86_UV */
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index becf47b8173..0b46ef261c7 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -140,6 +140,9 @@
#define IPI_RESET_LIMIT 1
/* after this # consecutive successes, bump up the throttle if it was lowered */
#define COMPLETE_THRESHOLD 5
+/* after this # of giveups (fall back to kernel IPI's) disable the use of
+ the BAU for a period of time */
+#define GIVEUP_LIMIT 100
#define UV_LB_SUBNODEID 0x10
@@ -149,7 +152,6 @@
/* 4 bits of software ack period */
#define UV2_ACK_MASK 0x7UL
#define UV2_ACK_UNITS_SHFT 3
-#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
/*
@@ -167,7 +169,6 @@
#define FLUSH_RETRY_TIMEOUT 2
#define FLUSH_GIVEUP 3
#define FLUSH_COMPLETE 4
-#define FLUSH_RETRY_BUSYBUG 5
/*
* tuning the action when the numalink network is extremely delayed
@@ -176,7 +177,7 @@
microseconds */
#define CONGESTED_REPS 10 /* long delays averaged over
this many broadcasts */
-#define CONGESTED_PERIOD 30 /* time for the bau to be
+#define DISABLED_PERIOD 10 /* time for the bau to be
disabled, in seconds */
/* see msg_type: */
#define MSG_NOOP 0
@@ -521,6 +522,12 @@ struct ptc_stats {
unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
+ unsigned long s_overipilimit; /* over the ipi reset limit */
+ unsigned long s_giveuplimit; /* disables, over giveup limit*/
+ unsigned long s_enters; /* entries to the driver */
+ unsigned long s_ipifordisabled; /* fall back to IPI; disabled */
+ unsigned long s_plugged; /* plugged by h/w bug*/
+ unsigned long s_congested; /* giveup on long wait */
/* destination statistics */
unsigned long d_alltlb; /* times all tlb's on this
cpu were flushed */
@@ -587,8 +594,8 @@ struct bau_control {
int timeout_tries;
int ipi_attempts;
int conseccompletes;
- int baudisabled;
- int set_bau_off;
+ short nobau;
+ short baudisabled;
short cpu;
short osnode;
short uvhub_cpu;
@@ -597,14 +604,16 @@ struct bau_control {
short cpus_in_socket;
short cpus_in_uvhub;
short partition_base_pnode;
- short using_desc; /* an index, like uvhub_cpu */
- unsigned int inuse_map;
+ short busy; /* all were busy (war) */
unsigned short message_number;
unsigned short uvhub_quiesce;
short socket_acknowledge_count[DEST_Q_SIZE];
cycles_t send_message;
+ cycles_t period_end;
+ cycles_t period_time;
spinlock_t uvhub_lock;
spinlock_t queue_lock;
+ spinlock_t disable_lock;
/* tunables */
int max_concurr;
int max_concurr_const;
@@ -615,9 +624,9 @@ struct bau_control {
int complete_threshold;
int cong_response_us;
int cong_reps;
- int cong_period;
- unsigned long clocks_per_100_usec;
- cycles_t period_time;
+ cycles_t disabled_period;
+ int period_giveups;
+ int giveup_limit;
long period_requests;
struct hub_and_pnode *thp;
};
@@ -722,6 +731,9 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
}
extern void uv_bau_message_intr1(void);
+#ifdef CONFIG_TRACING
+#define trace_uv_bau_message_intr1 uv_bau_message_intr1
+#endif
extern void uv_bau_timeout_intr1(void);
struct atomic_short {
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 21f7385badb..c63e925fd6b 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -5,7 +5,7 @@
*
* SGI UV architectural definitions
*
- * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_X86_UV_UV_HUB_H
@@ -175,6 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
*/
#define UV1_HUB_REVISION_BASE 1
#define UV2_HUB_REVISION_BASE 3
+#define UV3_HUB_REVISION_BASE 5
static inline int is_uv1_hub(void)
{
@@ -183,17 +184,24 @@ static inline int is_uv1_hub(void)
static inline int is_uv2_hub(void)
{
- return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
+ return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) &&
+ (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE));
+}
+
+static inline int is_uv3_hub(void)
+{
+ return uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE;
}
-static inline int is_uv2_1_hub(void)
+static inline int is_uv_hub(void)
{
- return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE;
+ return uv_hub_info->hub_revision;
}
-static inline int is_uv2_2_hub(void)
+/* code common to uv2 and uv3 only */
+static inline int is_uvx_hub(void)
{
- return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE + 1;
+ return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
}
union uvh_apicid {
@@ -230,14 +238,23 @@ union uvh_apicid {
#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
-#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE \
- : UV2_LOCAL_MMR_BASE)
-#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE \
- : UV2_GLOBAL_MMR32_BASE)
-#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
- UV2_LOCAL_MMR_SIZE)
+#define UV3_LOCAL_MMR_BASE 0xfa000000UL
+#define UV3_GLOBAL_MMR32_BASE 0xfc000000UL
+#define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
+#define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
+
+#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
+ (is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
+ UV3_LOCAL_MMR_BASE))
+#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE :\
+ (is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE :\
+ UV3_GLOBAL_MMR32_BASE))
+#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
+ (is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
+ UV3_LOCAL_MMR_SIZE))
#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
- UV2_GLOBAL_MMR32_SIZE)
+ (is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE :\
+ UV3_GLOBAL_MMR32_SIZE))
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
@@ -475,8 +492,8 @@ struct uv_blade_info {
unsigned short nr_online_cpus;
unsigned short pnode;
short memory_nid;
- spinlock_t nmi_lock;
- unsigned long nmi_count;
+ spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
+ unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
};
extern struct uv_blade_info *uv_blade_info;
extern short *uv_node_to_blade;
@@ -549,6 +566,59 @@ static inline int uv_num_possible_blades(void)
return uv_possible_blades;
}
+/* Per Hub NMI support */
+extern void uv_nmi_setup(void);
+
+/* BMC sets a bit this MMR non-zero before sending an NMI */
+#define UVH_NMI_MMR UVH_SCRATCH5
+#define UVH_NMI_MMR_CLEAR UVH_SCRATCH5_ALIAS
+#define UVH_NMI_MMR_SHIFT 63
+#define UVH_NMI_MMR_TYPE "SCRATCH5"
+
+/* Newer SMM NMI handler, not present in all systems */
+#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0
+#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS
+#define UVH_NMI_MMRX_SHIFT (is_uv1_hub() ? \
+ UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :\
+ UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
+#define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
+
+/* Non-zero indicates newer SMM NMI handler present */
+#define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST
+
+/* Indicates to BIOS that we want to use the newer SMM NMI handler */
+#define UVH_NMI_MMRX_REQ UVH_SCRATCH5_ALIAS_2
+#define UVH_NMI_MMRX_REQ_SHIFT 62
+
+struct uv_hub_nmi_s {
+ raw_spinlock_t nmi_lock;
+ atomic_t in_nmi; /* flag this node in UV NMI IRQ */
+ atomic_t cpu_owner; /* last locker of this struct */
+ atomic_t read_mmr_count; /* count of MMR reads */
+ atomic_t nmi_count; /* count of true UV NMIs */
+ unsigned long nmi_value; /* last value read from NMI MMR */
+};
+
+struct uv_cpu_nmi_s {
+ struct uv_hub_nmi_s *hub;
+ atomic_t state;
+ atomic_t pinging;
+ int queries;
+ int pings;
+};
+
+DECLARE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi);
+#define uv_cpu_nmi (__get_cpu_var(__uv_cpu_nmi))
+#define uv_hub_nmi (uv_cpu_nmi.hub)
+#define uv_cpu_nmi_per(cpu) (per_cpu(__uv_cpu_nmi, cpu))
+#define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub)
+
+/* uv_cpu_nmi_states */
+#define UV_NMI_STATE_OUT 0
+#define UV_NMI_STATE_IN 1
+#define UV_NMI_STATE_DUMP 2
+#define UV_NMI_STATE_DUMP_DONE 3
+
/* Update SCIR state */
static inline void uv_set_scir_bits(unsigned char value)
{
@@ -599,6 +669,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
* 1 - UV1 rev 1.0 initial silicon
* 2 - UV1 rev 2.0 production silicon
* 3 - UV2 rev 1.0 initial silicon
+ * 5 - UV3 rev 1.0 initial silicon
*/
static inline int uv_get_min_hub_revision_id(void)
{
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index cf1d73643f6..ddd8db6b6e7 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,16 +5,25 @@
*
* SGI UV MMR definitions
*
- * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_X86_UV_UV_MMRS_H
#define _ASM_X86_UV_UV_MMRS_H
/*
- * This file contains MMR definitions for both UV1 & UV2 hubs.
+ * This file contains MMR definitions for all UV hubs types.
*
- * In general, MMR addresses and structures are identical on both hubs.
+ * To minimize coding differences between hub types, the symbols are
+ * grouped by architecture types.
+ *
+ * UVH - definitions common to all UV hub types.
+ * UVXH - definitions common to all UV eXtended hub types (currently 2 & 3).
+ * UV1H - definitions specific to UV type 1 hub.
+ * UV2H - definitions specific to UV type 2 hub.
+ * UV3H - definitions specific to UV type 3 hub.
+ *
+ * So in general, MMR addresses and structures are identical on all hubs types.
* These MMRs are identified as:
* #define UVH_xxx <address>
* union uvh_xxx {
@@ -23,24 +32,36 @@
* } s;
* };
*
- * If the MMR exists on both hub type but has different addresses or
- * contents, the MMR definition is similar to:
- * #define UV1H_xxx <uv1 address>
- * #define UV2H_xxx <uv2address>
- * #define UVH_xxx (is_uv1_hub() ? UV1H_xxx : UV2H_xxx)
+ * If the MMR exists on all hub types but have different addresses:
+ * #define UV1Hxxx a
+ * #define UV2Hxxx b
+ * #define UV3Hxxx c
+ * #define UVHxxx (is_uv1_hub() ? UV1Hxxx :
+ * (is_uv2_hub() ? UV2Hxxx :
+ * UV3Hxxx))
+ *
+ * If the MMR exists on all hub types > 1 but have different addresses:
+ * #define UV2Hxxx b
+ * #define UV3Hxxx c
+ * #define UVXHxxx (is_uv2_hub() ? UV2Hxxx :
+ * UV3Hxxx))
+ *
* union uvh_xxx {
* unsigned long v;
- * struct uv1h_int_cmpd_s { (Common fields only)
+ * struct uvh_xxx_s { # Common fields only
* } s;
- * struct uv1h_int_cmpd_s { (Full UV1 definition)
+ * struct uv1h_xxx_s { # Full UV1 definition (*)
* } s1;
- * struct uv2h_int_cmpd_s { (Full UV2 definition)
+ * struct uv2h_xxx_s { # Full UV2 definition (*)
* } s2;
+ * struct uv3h_xxx_s { # Full UV3 definition (*)
+ * } s3;
* };
+ * (* - if present and different than the common struct)
*
- * Only essential difference are enumerated. For example, if the address is
- * the same for both UV1 & UV2, only a single #define is generated. Likewise,
- * if the contents is the same for both hubs, only the "s" structure is
+ * Only essential differences are enumerated. For example, if the address is
+ * the same for all UV's, only a single #define is generated. Likewise,
+ * if the contents is the same for all hubs, only the "s" structure is
* generated.
*
* If the MMR exists on ONLY 1 type of hub, no generic definition is
@@ -51,6 +72,8 @@
* struct uvh_int_cmpd_s {
* } sn;
* };
+ *
+ * (GEN Flags: mflags_opt= undefs=0 UV23=UVXH)
*/
#define UV_MMR_ENABLE (1UL << 63)
@@ -58,15 +81,18 @@
#define UV1_HUB_PART_NUMBER 0x88a5
#define UV2_HUB_PART_NUMBER 0x8eb8
#define UV2_HUB_PART_NUMBER_X 0x1111
+#define UV3_HUB_PART_NUMBER 0x9578
+#define UV3_HUB_PART_NUMBER_X 0x4321
-/* Compat: if this #define is present, UV headers support UV2 */
+/* Compat: Indicate which UV Hubs are supported. */
#define UV2_HUB_IS_SUPPORTED 1
+#define UV3_HUB_IS_SUPPORTED 1
/* ========================================================================= */
/* UVH_BAU_DATA_BROADCAST */
/* ========================================================================= */
-#define UVH_BAU_DATA_BROADCAST 0x61688UL
-#define UVH_BAU_DATA_BROADCAST_32 0x440
+#define UVH_BAU_DATA_BROADCAST 0x61688UL
+#define UVH_BAU_DATA_BROADCAST_32 0x440
#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
@@ -82,8 +108,8 @@ union uvh_bau_data_broadcast_u {
/* ========================================================================= */
/* UVH_BAU_DATA_CONFIG */
/* ========================================================================= */
-#define UVH_BAU_DATA_CONFIG 0x61680UL
-#define UVH_BAU_DATA_CONFIG_32 0x438
+#define UVH_BAU_DATA_CONFIG 0x61680UL
+#define UVH_BAU_DATA_CONFIG_32 0x438
#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
@@ -121,10 +147,14 @@ union uvh_bau_data_config_u {
/* ========================================================================= */
/* UVH_EVENT_OCCURRED0 */
/* ========================================================================= */
-#define UVH_EVENT_OCCURRED0 0x70000UL
-#define UVH_EVENT_OCCURRED0_32 0x5e8
+#define UVH_EVENT_OCCURRED0 0x70000UL
+#define UVH_EVENT_OCCURRED0_32 0x5e8
+
+#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
-#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
@@ -135,7 +165,6 @@ union uvh_bau_data_config_u {
#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
-#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
@@ -181,7 +210,6 @@ union uvh_bau_data_config_u {
#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
-#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
@@ -192,7 +220,6 @@ union uvh_bau_data_config_u {
#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
-#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
@@ -239,188 +266,130 @@ union uvh_bau_data_config_u {
#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
-#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2
-#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
-#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
-#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
-#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
-#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
-#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
-#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
-#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
-#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
-#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
-#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
-#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
-#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
-#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
-#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
-#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
-#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
-#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
-#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
-#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
+#define UVXH_EVENT_OCCURRED0_QP_HCERR_SHFT 1
+#define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT 2
+#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
+#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
+#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
+#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
+#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
+#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
+#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
+#define UVXH_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
+#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
+#define UVXH_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
+#define UVXH_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
+#define UVXH_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
+#define UVXH_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
+#define UVXH_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
+#define UVXH_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
+#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
+#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
+#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
+#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
+#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
+#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
+#define UVXH_EVENT_OCCURRED0_IPI_INT_SHFT 53
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
+#define UVXH_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
+#define UVXH_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
+#define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
+#define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
+#define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
+#define UVXH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
+#define UVXH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
+#define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
+#define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
+#define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
+#define UVXH_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
+#define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
+#define UVXH_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
+#define UVXH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
+#define UVXH_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
+#define UVXH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
+#define UVXH_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
+#define UVXH_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
+#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
+#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
+#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
+#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
+#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
+#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
+#define UVXH_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
+#define UVXH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
union uvh_event_occurred0_u {
unsigned long v;
- struct uv1h_event_occurred0_s {
+ struct uvh_event_occurred0_s {
unsigned long lb_hcerr:1; /* RW, W1C */
- unsigned long gr0_hcerr:1; /* RW, W1C */
- unsigned long gr1_hcerr:1; /* RW, W1C */
- unsigned long lh_hcerr:1; /* RW, W1C */
- unsigned long rh_hcerr:1; /* RW, W1C */
- unsigned long xn_hcerr:1; /* RW, W1C */
- unsigned long si_hcerr:1; /* RW, W1C */
- unsigned long lb_aoerr0:1; /* RW, W1C */
- unsigned long gr0_aoerr0:1; /* RW, W1C */
- unsigned long gr1_aoerr0:1; /* RW, W1C */
- unsigned long lh_aoerr0:1; /* RW, W1C */
+ unsigned long rsvd_1_10:10;
unsigned long rh_aoerr0:1; /* RW, W1C */
- unsigned long xn_aoerr0:1; /* RW, W1C */
- unsigned long si_aoerr0:1; /* RW, W1C */
- unsigned long lb_aoerr1:1; /* RW, W1C */
- unsigned long gr0_aoerr1:1; /* RW, W1C */
- unsigned long gr1_aoerr1:1; /* RW, W1C */
- unsigned long lh_aoerr1:1; /* RW, W1C */
- unsigned long rh_aoerr1:1; /* RW, W1C */
- unsigned long xn_aoerr1:1; /* RW, W1C */
- unsigned long si_aoerr1:1; /* RW, W1C */
- unsigned long rh_vpi_int:1; /* RW, W1C */
- unsigned long system_shutdown_int:1; /* RW, W1C */
- unsigned long lb_irq_int_0:1; /* RW, W1C */
- unsigned long lb_irq_int_1:1; /* RW, W1C */
- unsigned long lb_irq_int_2:1; /* RW, W1C */
- unsigned long lb_irq_int_3:1; /* RW, W1C */
- unsigned long lb_irq_int_4:1; /* RW, W1C */
- unsigned long lb_irq_int_5:1; /* RW, W1C */
- unsigned long lb_irq_int_6:1; /* RW, W1C */
- unsigned long lb_irq_int_7:1; /* RW, W1C */
- unsigned long lb_irq_int_8:1; /* RW, W1C */
- unsigned long lb_irq_int_9:1; /* RW, W1C */
- unsigned long lb_irq_int_10:1; /* RW, W1C */
- unsigned long lb_irq_int_11:1; /* RW, W1C */
- unsigned long lb_irq_int_12:1; /* RW, W1C */
- unsigned long lb_irq_int_13:1; /* RW, W1C */
- unsigned long lb_irq_int_14:1; /* RW, W1C */
- unsigned long lb_irq_int_15:1; /* RW, W1C */
- unsigned long l1_nmi_int:1; /* RW, W1C */
- unsigned long stop_clock:1; /* RW, W1C */
- unsigned long asic_to_l1:1; /* RW, W1C */
- unsigned long l1_to_asic:1; /* RW, W1C */
- unsigned long ltc_int:1; /* RW, W1C */
- unsigned long la_seq_trigger:1; /* RW, W1C */
- unsigned long ipi_int:1; /* RW, W1C */
- unsigned long extio_int0:1; /* RW, W1C */
- unsigned long extio_int1:1; /* RW, W1C */
- unsigned long extio_int2:1; /* RW, W1C */
- unsigned long extio_int3:1; /* RW, W1C */
- unsigned long profile_int:1; /* RW, W1C */
- unsigned long rtc0:1; /* RW, W1C */
- unsigned long rtc1:1; /* RW, W1C */
- unsigned long rtc2:1; /* RW, W1C */
- unsigned long rtc3:1; /* RW, W1C */
- unsigned long bau_data:1; /* RW, W1C */
- unsigned long power_management_req:1; /* RW, W1C */
- unsigned long rsvd_57_63:7;
- } s1;
- struct uv2h_event_occurred0_s {
+ unsigned long rsvd_12_63:52;
+ } s;
+ struct uvxh_event_occurred0_s {
unsigned long lb_hcerr:1; /* RW */
unsigned long qp_hcerr:1; /* RW */
unsigned long rh_hcerr:1; /* RW */
@@ -481,19 +450,37 @@ union uvh_event_occurred0_u {
unsigned long extio_int3:1; /* RW */
unsigned long profile_int:1; /* RW */
unsigned long rsvd_59_63:5;
- } s2;
+ } sx;
};
/* ========================================================================= */
/* UVH_EVENT_OCCURRED0_ALIAS */
/* ========================================================================= */
-#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
-#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
+#define UVH_EVENT_OCCURRED0_ALIAS 0x70008UL
+#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
+
+
+/* ========================================================================= */
+/* UVH_EXTIO_INT0_BROADCAST */
+/* ========================================================================= */
+#define UVH_EXTIO_INT0_BROADCAST 0x61448UL
+#define UVH_EXTIO_INT0_BROADCAST_32 0x3f0
+
+#define UVH_EXTIO_INT0_BROADCAST_ENABLE_SHFT 0
+#define UVH_EXTIO_INT0_BROADCAST_ENABLE_MASK 0x0000000000000001UL
+
+union uvh_extio_int0_broadcast_u {
+ unsigned long v;
+ struct uvh_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s;
+};
/* ========================================================================= */
/* UVH_GR0_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
+#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
@@ -531,7 +518,7 @@ union uvh_gr0_tlb_int0_config_u {
/* ========================================================================= */
/* UVH_GR0_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
+#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
@@ -571,9 +558,11 @@ union uvh_gr0_tlb_int1_config_u {
/* ========================================================================= */
#define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL
#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
-#define UVH_GR0_TLB_MMR_CONTROL (is_uv1_hub() ? \
- UV1H_GR0_TLB_MMR_CONTROL : \
- UV2H_GR0_TLB_MMR_CONTROL)
+#define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL
+#define UVH_GR0_TLB_MMR_CONTROL \
+ (is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL : \
+ (is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL : \
+ UV3H_GR0_TLB_MMR_CONTROL))
#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
@@ -611,6 +600,21 @@ union uvh_gr0_tlb_int1_config_u {
#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
@@ -630,6 +634,23 @@ union uvh_gr0_tlb_int1_config_u {
#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
union uvh_gr0_tlb_mmr_control_u {
unsigned long v;
struct uvh_gr0_tlb_mmr_control_s {
@@ -642,7 +663,9 @@ union uvh_gr0_tlb_mmr_control_u {
unsigned long rsvd_21_29:9;
unsigned long mmr_write:1; /* WP */
unsigned long mmr_read:1; /* WP */
- unsigned long rsvd_32_63:32;
+ unsigned long rsvd_32_48:17;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52_63:12;
} s;
struct uv1h_gr0_tlb_mmr_control_s {
unsigned long index:12; /* RW */
@@ -666,6 +689,23 @@ union uvh_gr0_tlb_mmr_control_u {
unsigned long mmr_inj_tlblruv:1; /* RW */
unsigned long rsvd_61_63:3;
} s1;
+ struct uvxh_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long rsvd_48:1;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52:1;
+ unsigned long rsvd_53_63:11;
+ } sx;
struct uv2h_gr0_tlb_mmr_control_s {
unsigned long index:12; /* RW */
unsigned long mem_sel:2; /* RW */
@@ -683,6 +723,24 @@ union uvh_gr0_tlb_mmr_control_u {
unsigned long mmr_inj_tlbram:1; /* RW */
unsigned long rsvd_53_63:11;
} s2;
+ struct uv3h_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long ecc_sel:1; /* RW */
+ unsigned long rsvd_22_29:8;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long undef_48:1; /* Undefined */
+ unsigned long rsvd_49_51:3;
+ unsigned long undef_52:1; /* Undefined */
+ unsigned long rsvd_53_63:11;
+ } s3;
};
/* ========================================================================= */
@@ -690,9 +748,11 @@ union uvh_gr0_tlb_mmr_control_u {
/* ========================================================================= */
#define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL
#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
-#define UVH_GR0_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \
- UV1H_GR0_TLB_MMR_READ_DATA_HI : \
- UV2H_GR0_TLB_MMR_READ_DATA_HI)
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI \
+ (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_HI : \
+ (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI : \
+ UV3H_GR0_TLB_MMR_READ_DATA_HI))
#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
@@ -703,6 +763,46 @@ union uvh_gr0_tlb_mmr_control_u {
#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
+
union uvh_gr0_tlb_mmr_read_data_hi_u {
unsigned long v;
struct uvh_gr0_tlb_mmr_read_data_hi_s {
@@ -712,6 +812,36 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
unsigned long larger:1; /* RO */
unsigned long rsvd_45_63:19;
} s;
+ struct uv1h_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s1;
+ struct uvxh_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } sx;
+ struct uv2h_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s2;
+ struct uv3h_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long aa_ext:1; /* RO */
+ unsigned long undef_46_54:9; /* Undefined */
+ unsigned long way_ecc:9; /* RO */
+ } s3;
};
/* ========================================================================= */
@@ -719,9 +849,11 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
/* ========================================================================= */
#define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL
#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
-#define UVH_GR0_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \
- UV1H_GR0_TLB_MMR_READ_DATA_LO : \
- UV2H_GR0_TLB_MMR_READ_DATA_LO)
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
+#define UVH_GR0_TLB_MMR_READ_DATA_LO \
+ (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_LO : \
+ (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO : \
+ UV3H_GR0_TLB_MMR_READ_DATA_LO))
#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
@@ -730,6 +862,34 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
union uvh_gr0_tlb_mmr_read_data_lo_u {
unsigned long v;
struct uvh_gr0_tlb_mmr_read_data_lo_s {
@@ -737,12 +897,32 @@ union uvh_gr0_tlb_mmr_read_data_lo_u {
unsigned long asid:24; /* RO */
unsigned long valid:1; /* RO */
} s;
+ struct uv1h_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s1;
+ struct uvxh_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } sx;
+ struct uv2h_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s2;
+ struct uv3h_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s3;
};
/* ========================================================================= */
/* UVH_GR1_TLB_INT0_CONFIG */
/* ========================================================================= */
-#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
+#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
@@ -780,7 +960,7 @@ union uvh_gr1_tlb_int0_config_u {
/* ========================================================================= */
/* UVH_GR1_TLB_INT1_CONFIG */
/* ========================================================================= */
-#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
+#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
@@ -820,9 +1000,11 @@ union uvh_gr1_tlb_int1_config_u {
/* ========================================================================= */
#define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL
#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
-#define UVH_GR1_TLB_MMR_CONTROL (is_uv1_hub() ? \
- UV1H_GR1_TLB_MMR_CONTROL : \
- UV2H_GR1_TLB_MMR_CONTROL)
+#define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL
+#define UVH_GR1_TLB_MMR_CONTROL \
+ (is_uv1_hub() ? UV1H_GR1_TLB_MMR_CONTROL : \
+ (is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL : \
+ UV3H_GR1_TLB_MMR_CONTROL))
#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
@@ -860,6 +1042,21 @@ union uvh_gr1_tlb_int1_config_u {
#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
@@ -879,6 +1076,23 @@ union uvh_gr1_tlb_int1_config_u {
#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
union uvh_gr1_tlb_mmr_control_u {
unsigned long v;
struct uvh_gr1_tlb_mmr_control_s {
@@ -891,7 +1105,9 @@ union uvh_gr1_tlb_mmr_control_u {
unsigned long rsvd_21_29:9;
unsigned long mmr_write:1; /* WP */
unsigned long mmr_read:1; /* WP */
- unsigned long rsvd_32_63:32;
+ unsigned long rsvd_32_48:17;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52_63:12;
} s;
struct uv1h_gr1_tlb_mmr_control_s {
unsigned long index:12; /* RW */
@@ -915,6 +1131,23 @@ union uvh_gr1_tlb_mmr_control_u {
unsigned long mmr_inj_tlblruv:1; /* RW */
unsigned long rsvd_61_63:3;
} s1;
+ struct uvxh_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long rsvd_48:1;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52:1;
+ unsigned long rsvd_53_63:11;
+ } sx;
struct uv2h_gr1_tlb_mmr_control_s {
unsigned long index:12; /* RW */
unsigned long mem_sel:2; /* RW */
@@ -932,6 +1165,24 @@ union uvh_gr1_tlb_mmr_control_u {
unsigned long mmr_inj_tlbram:1; /* RW */
unsigned long rsvd_53_63:11;
} s2;
+ struct uv3h_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long ecc_sel:1; /* RW */
+ unsigned long rsvd_22_29:8;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long undef_48:1; /* Undefined */
+ unsigned long rsvd_49_51:3;
+ unsigned long undef_52:1; /* Undefined */
+ unsigned long rsvd_53_63:11;
+ } s3;
};
/* ========================================================================= */
@@ -939,9 +1190,11 @@ union uvh_gr1_tlb_mmr_control_u {
/* ========================================================================= */
#define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL
#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
-#define UVH_GR1_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \
- UV1H_GR1_TLB_MMR_READ_DATA_HI : \
- UV2H_GR1_TLB_MMR_READ_DATA_HI)
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI \
+ (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_HI : \
+ (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI : \
+ UV3H_GR1_TLB_MMR_READ_DATA_HI))
#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
@@ -952,6 +1205,46 @@ union uvh_gr1_tlb_mmr_control_u {
#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
+
union uvh_gr1_tlb_mmr_read_data_hi_u {
unsigned long v;
struct uvh_gr1_tlb_mmr_read_data_hi_s {
@@ -961,6 +1254,36 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
unsigned long larger:1; /* RO */
unsigned long rsvd_45_63:19;
} s;
+ struct uv1h_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s1;
+ struct uvxh_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } sx;
+ struct uv2h_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s2;
+ struct uv3h_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long aa_ext:1; /* RO */
+ unsigned long undef_46_54:9; /* Undefined */
+ unsigned long way_ecc:9; /* RO */
+ } s3;
};
/* ========================================================================= */
@@ -968,9 +1291,11 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
/* ========================================================================= */
#define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL
#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
-#define UVH_GR1_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \
- UV1H_GR1_TLB_MMR_READ_DATA_LO : \
- UV2H_GR1_TLB_MMR_READ_DATA_LO)
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
+#define UVH_GR1_TLB_MMR_READ_DATA_LO \
+ (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_LO : \
+ (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO : \
+ UV3H_GR1_TLB_MMR_READ_DATA_LO))
#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
@@ -979,6 +1304,34 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
union uvh_gr1_tlb_mmr_read_data_lo_u {
unsigned long v;
struct uvh_gr1_tlb_mmr_read_data_lo_s {
@@ -986,12 +1339,32 @@ union uvh_gr1_tlb_mmr_read_data_lo_u {
unsigned long asid:24; /* RO */
unsigned long valid:1; /* RO */
} s;
+ struct uv1h_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s1;
+ struct uvxh_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } sx;
+ struct uv2h_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s2;
+ struct uv3h_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s3;
};
/* ========================================================================= */
/* UVH_INT_CMPB */
/* ========================================================================= */
-#define UVH_INT_CMPB 0x22080UL
+#define UVH_INT_CMPB 0x22080UL
#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
@@ -1007,10 +1380,13 @@ union uvh_int_cmpb_u {
/* ========================================================================= */
/* UVH_INT_CMPC */
/* ========================================================================= */
-#define UVH_INT_CMPC 0x22100UL
+#define UVH_INT_CMPC 0x22100UL
-#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
-#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
+
+#define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT 0
+#define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK 0x00ffffffffffffffUL
union uvh_int_cmpc_u {
unsigned long v;
@@ -1023,10 +1399,13 @@ union uvh_int_cmpc_u {
/* ========================================================================= */
/* UVH_INT_CMPD */
/* ========================================================================= */
-#define UVH_INT_CMPD 0x22180UL
+#define UVH_INT_CMPD 0x22180UL
+
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
-#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
-#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
+#define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT 0
+#define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK 0x00ffffffffffffffUL
union uvh_int_cmpd_u {
unsigned long v;
@@ -1039,8 +1418,8 @@ union uvh_int_cmpd_u {
/* ========================================================================= */
/* UVH_IPI_INT */
/* ========================================================================= */
-#define UVH_IPI_INT 0x60500UL
-#define UVH_IPI_INT_32 0x348
+#define UVH_IPI_INT 0x60500UL
+#define UVH_IPI_INT_32 0x348
#define UVH_IPI_INT_VECTOR_SHFT 0
#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
@@ -1069,8 +1448,8 @@ union uvh_ipi_int_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
@@ -1091,8 +1470,8 @@ union uvh_lb_bau_intd_payload_queue_first_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1109,8 +1488,8 @@ union uvh_lb_bau_intd_payload_queue_last_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1127,8 +1506,8 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
@@ -1189,14 +1568,21 @@ union uvh_lb_bau_intd_software_acknowledge_u {
/* ========================================================================= */
/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
/* ========================================================================= */
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
+
/* ========================================================================= */
/* UVH_LB_BAU_MISC_CONTROL */
/* ========================================================================= */
-#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
-#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
+#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV1H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV2H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV3H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
+#define UV1H_LB_BAU_MISC_CONTROL_32 0x320170UL
+#define UV2H_LB_BAU_MISC_CONTROL_32 0x320170UL
+#define UV3H_LB_BAU_MISC_CONTROL_32 0x320170UL
#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
@@ -1213,6 +1599,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
@@ -1228,6 +1615,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
@@ -1262,6 +1650,53 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UVXH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
@@ -1309,6 +1744,59 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT 37
+#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
+#define UV3H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_MASK 0x0000002000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
union uvh_lb_bau_misc_control_u {
unsigned long v;
struct uvh_lb_bau_misc_control_s {
@@ -1327,7 +1815,8 @@ union uvh_lb_bau_misc_control_u {
unsigned long programmed_initial_priority:3; /* RW */
unsigned long use_incoming_priority:1; /* RW */
unsigned long enable_programmed_initial_priority:1;/* RW */
- unsigned long rsvd_29_63:35;
+ unsigned long rsvd_29_47:19;
+ unsigned long fun:16; /* RW */
} s;
struct uv1h_lb_bau_misc_control_s {
unsigned long rejection_delay:8; /* RW */
@@ -1348,6 +1837,32 @@ union uvh_lb_bau_misc_control_u {
unsigned long rsvd_29_47:19;
unsigned long fun:16; /* RW */
} s1;
+ struct uvxh_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long enable_automatic_apic_mode_selection:1;/* RW */
+ unsigned long apic_mode_status:1; /* RO */
+ unsigned long suppress_interrupts_to_self:1; /* RW */
+ unsigned long enable_lock_based_system_flush:1;/* RW */
+ unsigned long enable_extended_sb_status:1; /* RW */
+ unsigned long suppress_int_prio_udt_to_self:1;/* RW */
+ unsigned long use_legacy_descriptor_formats:1;/* RW */
+ unsigned long rsvd_36_47:12;
+ unsigned long fun:16; /* RW */
+ } sx;
struct uv2h_lb_bau_misc_control_s {
unsigned long rejection_delay:8; /* RW */
unsigned long apic_mode:1; /* RW */
@@ -1374,13 +1889,42 @@ union uvh_lb_bau_misc_control_u {
unsigned long rsvd_36_47:12;
unsigned long fun:16; /* RW */
} s2;
+ struct uv3h_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long enable_automatic_apic_mode_selection:1;/* RW */
+ unsigned long apic_mode_status:1; /* RO */
+ unsigned long suppress_interrupts_to_self:1; /* RW */
+ unsigned long enable_lock_based_system_flush:1;/* RW */
+ unsigned long enable_extended_sb_status:1; /* RW */
+ unsigned long suppress_int_prio_udt_to_self:1;/* RW */
+ unsigned long use_legacy_descriptor_formats:1;/* RW */
+ unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */
+ unsigned long enable_intd_prefetch_hint:1; /* RW */
+ unsigned long thread_kill_timebase:8; /* RW */
+ unsigned long rsvd_46_47:2;
+ unsigned long fun:16; /* RW */
+ } s3;
};
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
@@ -1402,8 +1946,8 @@ union uvh_lb_bau_sb_activation_control_u {
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
@@ -1418,8 +1962,8 @@ union uvh_lb_bau_sb_activation_status_0_u {
/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
@@ -1434,8 +1978,8 @@ union uvh_lb_bau_sb_activation_status_1_u {
/* ========================================================================= */
/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
/* ========================================================================= */
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
@@ -1456,7 +2000,10 @@ union uvh_lb_bau_sb_descriptor_base_u {
/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
-#define UVH_NODE_ID 0x0UL
+#define UVH_NODE_ID 0x0UL
+#define UV1H_NODE_ID 0x0UL
+#define UV2H_NODE_ID 0x0UL
+#define UV3H_NODE_ID 0x0UL
#define UVH_NODE_ID_FORCE1_SHFT 0
#define UVH_NODE_ID_MANUFACTURER_SHFT 1
@@ -1484,6 +2031,21 @@ union uvh_lb_bau_sb_descriptor_base_u {
#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+#define UVXH_NODE_ID_FORCE1_SHFT 0
+#define UVXH_NODE_ID_MANUFACTURER_SHFT 1
+#define UVXH_NODE_ID_PART_NUMBER_SHFT 12
+#define UVXH_NODE_ID_REVISION_SHFT 28
+#define UVXH_NODE_ID_NODE_ID_SHFT 32
+#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50
+#define UVXH_NODE_ID_NI_PORT_SHFT 57
+#define UVXH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVXH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVXH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVXH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UVXH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVXH_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
+#define UVXH_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+
#define UV2H_NODE_ID_FORCE1_SHFT 0
#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
@@ -1499,6 +2061,25 @@ union uvh_lb_bau_sb_descriptor_base_u {
#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+#define UV3H_NODE_ID_FORCE1_SHFT 0
+#define UV3H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV3H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV3H_NODE_ID_REVISION_SHFT 28
+#define UV3H_NODE_ID_NODE_ID_SHFT 32
+#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48
+#define UV3H_NODE_ID_RESERVED_2_SHFT 49
+#define UV3H_NODE_ID_NODES_PER_BIT_SHFT 50
+#define UV3H_NODE_ID_NI_PORT_SHFT 57
+#define UV3H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV3H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV3H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV3H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV3H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
+#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
+#define UV3H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
+#define UV3H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+
union uvh_node_id_u {
unsigned long v;
struct uvh_node_id_s {
@@ -1521,6 +2102,17 @@ union uvh_node_id_u {
unsigned long ni_port:4; /* RO */
unsigned long rsvd_60_63:4;
} s1;
+ struct uvxh_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47_49:3;
+ unsigned long nodes_per_bit:7; /* RO */
+ unsigned long ni_port:5; /* RO */
+ unsigned long rsvd_62_63:2;
+ } sx;
struct uv2h_node_id_s {
unsigned long force1:1; /* RO */
unsigned long manufacturer:11; /* RO */
@@ -1532,13 +2124,26 @@ union uvh_node_id_u {
unsigned long ni_port:5; /* RO */
unsigned long rsvd_62_63:2;
} s2;
+ struct uv3h_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47:1;
+ unsigned long router_select:1; /* RO */
+ unsigned long rsvd_49:1;
+ unsigned long nodes_per_bit:7; /* RO */
+ unsigned long ni_port:5; /* RO */
+ unsigned long rsvd_62_63:2;
+ } s3;
};
/* ========================================================================= */
/* UVH_NODE_PRESENT_TABLE */
/* ========================================================================= */
-#define UVH_NODE_PRESENT_TABLE 0x1400UL
-#define UVH_NODE_PRESENT_TABLE_DEPTH 16
+#define UVH_NODE_PRESENT_TABLE 0x1400UL
+#define UVH_NODE_PRESENT_TABLE_DEPTH 16
#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
@@ -1553,7 +2158,7 @@ union uvh_node_present_table_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
@@ -1577,7 +2182,7 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
@@ -1601,7 +2206,7 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
@@ -1625,7 +2230,7 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1642,7 +2247,7 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1659,7 +2264,7 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1676,7 +2281,10 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV1H_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL
#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
@@ -1690,11 +2298,21 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+
#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+
union uvh_rh_gam_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_config_mmr_s {
@@ -1709,20 +2327,37 @@ union uvh_rh_gam_config_mmr_u {
unsigned long mmiol_cfg:1; /* RW */
unsigned long rsvd_13_63:51;
} s1;
+ struct uvxh_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } sx;
struct uv2h_rh_gam_config_mmr_s {
unsigned long m_skt:6; /* RW */
unsigned long n_skt:4; /* RW */
unsigned long rsvd_10_63:54;
} s2;
+ struct uv3h_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s3;
};
/* ========================================================================= */
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
@@ -1733,6 +2368,13 @@ union uvh_rh_gam_config_mmr_u {
#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
@@ -1740,12 +2382,23 @@ union uvh_rh_gam_config_mmr_u {
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_SHFT 62
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK 0x4000000000000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_gru_overlay_config_mmr_s {
unsigned long rsvd_0_27:28;
unsigned long base:18; /* RW */
- unsigned long rsvd_46_62:17;
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} s;
struct uv1h_rh_gam_gru_overlay_config_mmr_s {
@@ -1758,6 +2411,14 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} s1;
+ struct uvxh_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } sx;
struct uv2h_rh_gam_gru_overlay_config_mmr_s {
unsigned long rsvd_0_27:28;
unsigned long base:18; /* RW */
@@ -1766,12 +2427,22 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
unsigned long rsvd_56_62:7;
unsigned long enable:1; /* RW */
} s2;
+ struct uv3h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_61:6;
+ unsigned long mode:1; /* RW */
+ unsigned long enable:1; /* RW */
+ } s3;
};
/* ========================================================================= */
/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
@@ -1814,10 +2485,15 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
/* ========================================================================= */
/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
@@ -1826,11 +2502,21 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
union uvh_rh_gam_mmr_overlay_config_mmr_u {
unsigned long v;
struct uvh_rh_gam_mmr_overlay_config_mmr_s {
@@ -1846,18 +2532,30 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u {
unsigned long rsvd_47_62:16;
unsigned long enable:1; /* RW */
} s1;
+ struct uvxh_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } sx;
struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
unsigned long rsvd_0_25:26;
unsigned long base:20; /* RW */
unsigned long rsvd_46_62:17;
unsigned long enable:1; /* RW */
} s2;
+ struct uv3h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } s3;
};
/* ========================================================================= */
/* UVH_RTC */
/* ========================================================================= */
-#define UVH_RTC 0x340000UL
+#define UVH_RTC 0x340000UL
#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
@@ -1873,7 +2571,7 @@ union uvh_rtc_u {
/* ========================================================================= */
/* UVH_RTC1_INT_CONFIG */
/* ========================================================================= */
-#define UVH_RTC1_INT_CONFIG 0x615c0UL
+#define UVH_RTC1_INT_CONFIG 0x615c0UL
#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
@@ -1911,8 +2609,8 @@ union uvh_rtc1_int_config_u {
/* ========================================================================= */
/* UVH_SCRATCH5 */
/* ========================================================================= */
-#define UVH_SCRATCH5 0x2d0200UL
-#define UVH_SCRATCH5_32 0x778
+#define UVH_SCRATCH5 0x2d0200UL
+#define UVH_SCRATCH5_32 0x778
#define UVH_SCRATCH5_SCRATCH5_SHFT 0
#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
@@ -1925,79 +2623,93 @@ union uvh_scratch5_u {
};
/* ========================================================================= */
-/* UV2H_EVENT_OCCURRED2 */
-/* ========================================================================= */
-#define UV2H_EVENT_OCCURRED2 0x70100UL
-#define UV2H_EVENT_OCCURRED2_32 0xb68
-
-#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
-#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
-#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
-#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
-#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
-#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
-#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
-#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
-#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
-#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
-#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
-#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
-#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
-#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
-#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
-#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
-#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
-#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
-#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
-#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
-#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
-#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
-#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
-#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
-#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
-#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
-#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
-#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
-#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
-#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
-#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
-#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
-#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
-#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
-#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
-#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
-#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
-#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
-#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
-#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
-#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
-#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
-#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
-#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
-#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
-#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
-#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
-
-union uv2h_event_occurred2_u {
+/* UVH_SCRATCH5_ALIAS */
+/* ========================================================================= */
+#define UVH_SCRATCH5_ALIAS 0x2d0208UL
+#define UVH_SCRATCH5_ALIAS_32 0x780
+
+
+/* ========================================================================= */
+/* UVH_SCRATCH5_ALIAS_2 */
+/* ========================================================================= */
+#define UVH_SCRATCH5_ALIAS_2 0x2d0210UL
+#define UVH_SCRATCH5_ALIAS_2_32 0x788
+
+
+/* ========================================================================= */
+/* UVXH_EVENT_OCCURRED2 */
+/* ========================================================================= */
+#define UVXH_EVENT_OCCURRED2 0x70100UL
+#define UVXH_EVENT_OCCURRED2_32 0xb68
+
+#define UVXH_EVENT_OCCURRED2_RTC_0_SHFT 0
+#define UVXH_EVENT_OCCURRED2_RTC_1_SHFT 1
+#define UVXH_EVENT_OCCURRED2_RTC_2_SHFT 2
+#define UVXH_EVENT_OCCURRED2_RTC_3_SHFT 3
+#define UVXH_EVENT_OCCURRED2_RTC_4_SHFT 4
+#define UVXH_EVENT_OCCURRED2_RTC_5_SHFT 5
+#define UVXH_EVENT_OCCURRED2_RTC_6_SHFT 6
+#define UVXH_EVENT_OCCURRED2_RTC_7_SHFT 7
+#define UVXH_EVENT_OCCURRED2_RTC_8_SHFT 8
+#define UVXH_EVENT_OCCURRED2_RTC_9_SHFT 9
+#define UVXH_EVENT_OCCURRED2_RTC_10_SHFT 10
+#define UVXH_EVENT_OCCURRED2_RTC_11_SHFT 11
+#define UVXH_EVENT_OCCURRED2_RTC_12_SHFT 12
+#define UVXH_EVENT_OCCURRED2_RTC_13_SHFT 13
+#define UVXH_EVENT_OCCURRED2_RTC_14_SHFT 14
+#define UVXH_EVENT_OCCURRED2_RTC_15_SHFT 15
+#define UVXH_EVENT_OCCURRED2_RTC_16_SHFT 16
+#define UVXH_EVENT_OCCURRED2_RTC_17_SHFT 17
+#define UVXH_EVENT_OCCURRED2_RTC_18_SHFT 18
+#define UVXH_EVENT_OCCURRED2_RTC_19_SHFT 19
+#define UVXH_EVENT_OCCURRED2_RTC_20_SHFT 20
+#define UVXH_EVENT_OCCURRED2_RTC_21_SHFT 21
+#define UVXH_EVENT_OCCURRED2_RTC_22_SHFT 22
+#define UVXH_EVENT_OCCURRED2_RTC_23_SHFT 23
+#define UVXH_EVENT_OCCURRED2_RTC_24_SHFT 24
+#define UVXH_EVENT_OCCURRED2_RTC_25_SHFT 25
+#define UVXH_EVENT_OCCURRED2_RTC_26_SHFT 26
+#define UVXH_EVENT_OCCURRED2_RTC_27_SHFT 27
+#define UVXH_EVENT_OCCURRED2_RTC_28_SHFT 28
+#define UVXH_EVENT_OCCURRED2_RTC_29_SHFT 29
+#define UVXH_EVENT_OCCURRED2_RTC_30_SHFT 30
+#define UVXH_EVENT_OCCURRED2_RTC_31_SHFT 31
+#define UVXH_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
+#define UVXH_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
+#define UVXH_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
+#define UVXH_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
+#define UVXH_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
+#define UVXH_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
+#define UVXH_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
+#define UVXH_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
+#define UVXH_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
+#define UVXH_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
+#define UVXH_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
+#define UVXH_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
+#define UVXH_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
+#define UVXH_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
+#define UVXH_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
+#define UVXH_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
+#define UVXH_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
+#define UVXH_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
+#define UVXH_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
+#define UVXH_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
+#define UVXH_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
+#define UVXH_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
+#define UVXH_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
+#define UVXH_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
+#define UVXH_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+
+union uvxh_event_occurred2_u {
unsigned long v;
- struct uv2h_event_occurred2_s {
+ struct uvxh_event_occurred2_s {
unsigned long rtc_0:1; /* RW */
unsigned long rtc_1:1; /* RW */
unsigned long rtc_2:1; /* RW */
@@ -2031,29 +2743,46 @@ union uv2h_event_occurred2_u {
unsigned long rtc_30:1; /* RW */
unsigned long rtc_31:1; /* RW */
unsigned long rsvd_32_63:32;
- } s1;
+ } sx;
};
/* ========================================================================= */
-/* UV2H_EVENT_OCCURRED2_ALIAS */
+/* UVXH_EVENT_OCCURRED2_ALIAS */
/* ========================================================================= */
-#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL
-#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
+#define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL
+#define UVXH_EVENT_OCCURRED2_ALIAS_32 0xb70
+
/* ========================================================================= */
-/* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */
+/* UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 */
/* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
+
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
-union uv2h_lb_bau_sb_activation_status_2_u {
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+
+union uvxh_lb_bau_sb_activation_status_2_u {
unsigned long v;
+ struct uvxh_lb_bau_sb_activation_status_2_s {
+ unsigned long aux_error:64; /* RW */
+ } sx;
struct uv2h_lb_bau_sb_activation_status_2_s {
unsigned long aux_error:64; /* RW */
- } s1;
+ } s2;
+ struct uv3h_lb_bau_sb_activation_status_2_s {
+ unsigned long aux_error:64; /* RW */
+ } s3;
};
/* ========================================================================= */
@@ -2073,5 +2802,127 @@ union uv1h_lb_target_physical_apic_id_mask_u {
} s1;
};
+/* ========================================================================= */
+/* UV3H_GR0_GAM_GR_CONFIG */
+/* ========================================================================= */
+#define UV3H_GR0_GAM_GR_CONFIG 0xc00028UL
+
+#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_SHFT 0
+#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10
+#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_MASK 0x000000000000003fUL
+#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+
+union uv3h_gr0_gam_gr_config_u {
+ unsigned long v;
+ struct uv3h_gr0_gam_gr_config_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long undef_6_9:4; /* Undefined */
+ unsigned long subspace:1; /* RW */
+ unsigned long reserved:53;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_GR1_GAM_GR_CONFIG */
+/* ========================================================================= */
+#define UV3H_GR1_GAM_GR_CONFIG 0x1000028UL
+
+#define UV3H_GR1_GAM_GR_CONFIG_M_SKT_SHFT 0
+#define UV3H_GR1_GAM_GR_CONFIG_SUBSPACE_SHFT 10
+#define UV3H_GR1_GAM_GR_CONFIG_M_SKT_MASK 0x000000000000003fUL
+#define UV3H_GR1_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+
+union uv3h_gr1_gam_gr_config_u {
+ unsigned long v;
+ struct uv3h_gr1_gam_gr_config_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long undef_6_9:4; /* Undefined */
+ unsigned long subspace:1; /* RW */
+ unsigned long reserved:53;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uv3h_rh_gam_mmioh_overlay_config0_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1604000UL
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uv3h_rh_gam_mmioh_overlay_config1_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+
+union uv3h_rh_gam_mmioh_redirect_config0_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+
+union uv3h_rh_gam_mmioh_redirect_config1_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
+};
+
#endif /* _ASM_X86_UV_UV_MMRS_H */
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index bb0522850b7..30be253dd28 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -1,33 +1,54 @@
#ifndef _ASM_X86_VDSO_H
#define _ASM_X86_VDSO_H
+#include <asm/page_types.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#ifndef __ASSEMBLER__
+
+#include <linux/mm_types.h>
+
+struct vdso_image {
+ void *data;
+ unsigned long size; /* Always a multiple of PAGE_SIZE */
+
+ /* text_mapping.pages is big enough for data/size page pointers */
+ struct vm_special_mapping text_mapping;
+
+ unsigned long alt, alt_len;
+
+ unsigned long sym_end_mapping; /* Total size of the mapping */
+
+ unsigned long sym_vvar_page;
+ unsigned long sym_hpet_page;
+ unsigned long sym_VDSO32_NOTE_MASK;
+ unsigned long sym___kernel_sigreturn;
+ unsigned long sym___kernel_rt_sigreturn;
+ unsigned long sym___kernel_vsyscall;
+ unsigned long sym_VDSO32_SYSENTER_RETURN;
+};
+
+#ifdef CONFIG_X86_64
+extern const struct vdso_image vdso_image_64;
+#endif
+
+#ifdef CONFIG_X86_X32
+extern const struct vdso_image vdso_image_x32;
+#endif
+
#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
-extern const char VDSO32_PRELINK[];
-
-/*
- * Given a pointer to the vDSO image, find the pointer to VDSO32_name
- * as that symbol is defined in the vDSO sources or linker script.
- */
-#define VDSO32_SYMBOL(base, name) \
-({ \
- extern const char VDSO32_##name[]; \
- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
-})
+extern const struct vdso_image vdso_image_32_int80;
+#ifdef CONFIG_COMPAT
+extern const struct vdso_image vdso_image_32_syscall;
#endif
+extern const struct vdso_image vdso_image_32_sysenter;
+
+extern const struct vdso_image *selected_vdso32;
+#endif
+
+extern void __init init_vdso_image(const struct vdso_image *image);
-/*
- * These symbols are defined with the addresses in the vsyscall page.
- * See vsyscall-sigreturn.S.
- */
-extern void __user __kernel_sigreturn;
-extern void __user __kernel_rt_sigreturn;
-
-/*
- * These symbols are defined by vdso32.S to mark the bounds
- * of the ELF DSO images included therein.
- */
-extern const char vdso32_int80_start, vdso32_int80_end;
-extern const char vdso32_syscall_start, vdso32_syscall_end;
-extern const char vdso32_sysenter_start, vdso32_sysenter_end;
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_VDSO_H */
diff --git a/arch/x86/include/asm/vga.h b/arch/x86/include/asm/vga.h
index c4b9dc2f67c..44282fbf7bf 100644
--- a/arch/x86/include/asm/vga.h
+++ b/arch/x86/include/asm/vga.h
@@ -17,4 +17,10 @@
#define vga_readb(x) (*(x))
#define vga_writeb(x, y) (*(y) = (x))
+#ifdef CONFIG_FB_EFI
+#define __ARCH_HAS_VGA_DEFAULT_DEVICE
+extern struct pci_dev *vga_default_device(void);
+extern void vga_set_default_device(struct pci_dev *pdev);
+#endif
+
#endif /* _ASM_X86_VGA_H */
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 815285bcace..3c3366c2e37 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -1,27 +1,73 @@
#ifndef _ASM_X86_VGTOD_H
#define _ASM_X86_VGTOD_H
-#include <asm/vsyscall.h>
+#include <linux/compiler.h>
#include <linux/clocksource.h>
+#ifdef BUILD_VDSO32_64
+typedef u64 gtod_long_t;
+#else
+typedef unsigned long gtod_long_t;
+#endif
+/*
+ * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
+ * so be carefull by modifying this structure.
+ */
struct vsyscall_gtod_data {
- seqlock_t lock;
+ unsigned seq;
+
+ int vclock_mode;
+ cycle_t cycle_last;
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
/* open coded 'struct timespec' */
- time_t wall_time_sec;
- u32 wall_time_nsec;
-
- struct timezone sys_tz;
- struct { /* extract of a clocksource struct */
- int vclock_mode;
- cycle_t cycle_last;
- cycle_t mask;
- u32 mult;
- u32 shift;
- } clock;
- struct timespec wall_to_monotonic;
- struct timespec wall_time_coarse;
+ u64 wall_time_snsec;
+ gtod_long_t wall_time_sec;
+ gtod_long_t monotonic_time_sec;
+ u64 monotonic_time_snsec;
+ gtod_long_t wall_time_coarse_sec;
+ gtod_long_t wall_time_coarse_nsec;
+ gtod_long_t monotonic_time_coarse_sec;
+ gtod_long_t monotonic_time_coarse_nsec;
+
+ int tz_minuteswest;
+ int tz_dsttime;
};
extern struct vsyscall_gtod_data vsyscall_gtod_data;
+static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
+{
+ unsigned ret;
+
+repeat:
+ ret = ACCESS_ONCE(s->seq);
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+ }
+ smp_rmb();
+ return ret;
+}
+
+static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
+ unsigned start)
+{
+ smp_rmb();
+ return unlikely(s->seq != start);
+}
+
+static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
+{
+ ++s->seq;
+ smp_wmb();
+}
+
+static inline void gtod_write_end(struct vsyscall_gtod_data *s)
+{
+ smp_wmb();
+ ++s->seq;
+}
+
#endif /* _ASM_X86_VGTOD_H */
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index e0f9aa16358..5da71c27cc5 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -16,7 +16,6 @@
#define _ASM_X86_VIRTEX_H
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/vmx.h>
#include <asm/svm.h>
diff --git a/arch/x86/include/asm/visws/cobalt.h b/arch/x86/include/asm/visws/cobalt.h
deleted file mode 100644
index 2edb37637ea..00000000000
--- a/arch/x86/include/asm/visws/cobalt.h
+++ /dev/null
@@ -1,127 +0,0 @@
-#ifndef _ASM_X86_VISWS_COBALT_H
-#define _ASM_X86_VISWS_COBALT_H
-
-#include <asm/fixmap.h>
-
-/*
- * Cobalt SGI Visual Workstation system ASIC
- */
-
-#define CO_CPU_NUM_PHYS 0x1e00
-#define CO_CPU_TAB_PHYS (CO_CPU_NUM_PHYS + 2)
-
-#define CO_CPU_MAX 4
-
-#define CO_CPU_PHYS 0xc2000000
-#define CO_APIC_PHYS 0xc4000000
-
-/* see set_fixmap() and asm/fixmap.h */
-#define CO_CPU_VADDR (fix_to_virt(FIX_CO_CPU))
-#define CO_APIC_VADDR (fix_to_virt(FIX_CO_APIC))
-
-/* Cobalt CPU registers -- relative to CO_CPU_VADDR, use co_cpu_*() */
-#define CO_CPU_REV 0x08
-#define CO_CPU_CTRL 0x10
-#define CO_CPU_STAT 0x20
-#define CO_CPU_TIMEVAL 0x30
-
-/* CO_CPU_CTRL bits */
-#define CO_CTRL_TIMERUN 0x04 /* 0 == disabled */
-#define CO_CTRL_TIMEMASK 0x08 /* 0 == unmasked */
-
-/* CO_CPU_STATUS bits */
-#define CO_STAT_TIMEINTR 0x02 /* (r) 1 == int pend, (w) 0 == clear */
-
-/* CO_CPU_TIMEVAL value */
-#define CO_TIME_HZ 100000000 /* Cobalt core rate */
-
-/* Cobalt APIC registers -- relative to CO_APIC_VADDR, use co_apic_*() */
-#define CO_APIC_HI(n) (((n) * 0x10) + 4)
-#define CO_APIC_LO(n) ((n) * 0x10)
-#define CO_APIC_ID 0x0ffc
-
-/* CO_APIC_ID bits */
-#define CO_APIC_ENABLE 0x00000100
-
-/* CO_APIC_LO bits */
-#define CO_APIC_MASK 0x00010000 /* 0 = enabled */
-#define CO_APIC_LEVEL 0x00008000 /* 0 = edge */
-
-/*
- * Where things are physically wired to Cobalt
- * #defines with no board _<type>_<rev>_ are common to all (thus far)
- */
-#define CO_APIC_IDE0 4
-#define CO_APIC_IDE1 2 /* Only on 320 */
-
-#define CO_APIC_8259 12 /* serial, floppy, par-l-l */
-
-/* Lithium PCI Bridge A -- "the one with 82557 Ethernet" */
-#define CO_APIC_PCIA_BASE0 0 /* and 1 */ /* slot 0, line 0 */
-#define CO_APIC_PCIA_BASE123 5 /* and 6 */ /* slot 0, line 1 */
-
-#define CO_APIC_PIIX4_USB 7 /* this one is weird */
-
-/* Lithium PCI Bridge B -- "the one with PIIX4" */
-#define CO_APIC_PCIB_BASE0 8 /* and 9-12 *//* slot 0, line 0 */
-#define CO_APIC_PCIB_BASE123 13 /* 14.15 */ /* slot 0, line 1 */
-
-#define CO_APIC_VIDOUT0 16
-#define CO_APIC_VIDOUT1 17
-#define CO_APIC_VIDIN0 18
-#define CO_APIC_VIDIN1 19
-
-#define CO_APIC_LI_AUDIO 22
-
-#define CO_APIC_AS 24
-#define CO_APIC_RE 25
-
-#define CO_APIC_CPU 28 /* Timer and Cache interrupt */
-#define CO_APIC_NMI 29
-#define CO_APIC_LAST CO_APIC_NMI
-
-/*
- * This is how irqs are assigned on the Visual Workstation.
- * Legacy devices get irq's 1-15 (system clock is 0 and is CO_APIC_CPU).
- * All other devices (including PCI) go to Cobalt and are irq's 16 on up.
- */
-#define CO_IRQ_APIC0 16 /* irq of apic entry 0 */
-#define IS_CO_APIC(irq) ((irq) >= CO_IRQ_APIC0)
-#define CO_IRQ(apic) (CO_IRQ_APIC0 + (apic)) /* apic ent to irq */
-#define CO_APIC(irq) ((irq) - CO_IRQ_APIC0) /* irq to apic ent */
-#define CO_IRQ_IDE0 14 /* knowledge of... */
-#define CO_IRQ_IDE1 15 /* ... ide driver defaults! */
-#define CO_IRQ_8259 CO_IRQ(CO_APIC_8259)
-
-#ifdef CONFIG_X86_VISWS_APIC
-static inline void co_cpu_write(unsigned long reg, unsigned long v)
-{
- *((volatile unsigned long *)(CO_CPU_VADDR+reg))=v;
-}
-
-static inline unsigned long co_cpu_read(unsigned long reg)
-{
- return *((volatile unsigned long *)(CO_CPU_VADDR+reg));
-}
-
-static inline void co_apic_write(unsigned long reg, unsigned long v)
-{
- *((volatile unsigned long *)(CO_APIC_VADDR+reg))=v;
-}
-
-static inline unsigned long co_apic_read(unsigned long reg)
-{
- return *((volatile unsigned long *)(CO_APIC_VADDR+reg));
-}
-#endif
-
-extern char visws_board_type;
-
-#define VISWS_320 0
-#define VISWS_540 1
-
-extern char visws_board_rev;
-
-extern int pci_visws_init(void);
-
-#endif /* _ASM_X86_VISWS_COBALT_H */
diff --git a/arch/x86/include/asm/visws/lithium.h b/arch/x86/include/asm/visws/lithium.h
deleted file mode 100644
index a10d89bc127..00000000000
--- a/arch/x86/include/asm/visws/lithium.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef _ASM_X86_VISWS_LITHIUM_H
-#define _ASM_X86_VISWS_LITHIUM_H
-
-#include <asm/fixmap.h>
-
-/*
- * Lithium is the SGI Visual Workstation I/O ASIC
- */
-
-#define LI_PCI_A_PHYS 0xfc000000 /* Enet is dev 3 */
-#define LI_PCI_B_PHYS 0xfd000000 /* PIIX4 is here */
-
-/* see set_fixmap() and asm/fixmap.h */
-#define LI_PCIA_VADDR (fix_to_virt(FIX_LI_PCIA))
-#define LI_PCIB_VADDR (fix_to_virt(FIX_LI_PCIB))
-
-/* Not a standard PCI? (not in linux/pci.h) */
-#define LI_PCI_BUSNUM 0x44 /* lo8: primary, hi8: sub */
-#define LI_PCI_INTEN 0x46
-
-/* LI_PCI_INTENT bits */
-#define LI_INTA_0 0x0001
-#define LI_INTA_1 0x0002
-#define LI_INTA_2 0x0004
-#define LI_INTA_3 0x0008
-#define LI_INTA_4 0x0010
-#define LI_INTB 0x0020
-#define LI_INTC 0x0040
-#define LI_INTD 0x0080
-
-/* More special purpose macros... */
-static inline void li_pcia_write16(unsigned long reg, unsigned short v)
-{
- *((volatile unsigned short *)(LI_PCIA_VADDR+reg))=v;
-}
-
-static inline unsigned short li_pcia_read16(unsigned long reg)
-{
- return *((volatile unsigned short *)(LI_PCIA_VADDR+reg));
-}
-
-static inline void li_pcib_write16(unsigned long reg, unsigned short v)
-{
- *((volatile unsigned short *)(LI_PCIB_VADDR+reg))=v;
-}
-
-static inline unsigned short li_pcib_read16(unsigned long reg)
-{
- return *((volatile unsigned short *)(LI_PCIB_VADDR+reg));
-}
-
-#endif /* _ASM_X86_VISWS_LITHIUM_H */
-
diff --git a/arch/x86/include/asm/visws/piix4.h b/arch/x86/include/asm/visws/piix4.h
deleted file mode 100644
index d0af4d338e7..00000000000
--- a/arch/x86/include/asm/visws/piix4.h
+++ /dev/null
@@ -1,107 +0,0 @@
-#ifndef _ASM_X86_VISWS_PIIX4_H
-#define _ASM_X86_VISWS_PIIX4_H
-
-/*
- * PIIX4 as used on SGI Visual Workstations
- */
-
-#define PIIX_PM_START 0x0F80
-
-#define SIO_GPIO_START 0x0FC0
-
-#define SIO_PM_START 0x0FC8
-
-#define PMBASE PIIX_PM_START
-#define GPIREG0 (PMBASE+0x30)
-#define GPIREG(x) (GPIREG0+((x)/8))
-#define GPIBIT(x) (1 << ((x)%8))
-
-#define PIIX_GPI_BD_ID1 18
-#define PIIX_GPI_BD_ID2 19
-#define PIIX_GPI_BD_ID3 20
-#define PIIX_GPI_BD_ID4 21
-#define PIIX_GPI_BD_REG GPIREG(PIIX_GPI_BD_ID1)
-#define PIIX_GPI_BD_MASK (GPIBIT(PIIX_GPI_BD_ID1) | \
- GPIBIT(PIIX_GPI_BD_ID2) | \
- GPIBIT(PIIX_GPI_BD_ID3) | \
- GPIBIT(PIIX_GPI_BD_ID4) )
-
-#define PIIX_GPI_BD_SHIFT (PIIX_GPI_BD_ID1 % 8)
-
-#define SIO_INDEX 0x2e
-#define SIO_DATA 0x2f
-
-#define SIO_DEV_SEL 0x7
-#define SIO_DEV_ENB 0x30
-#define SIO_DEV_MSB 0x60
-#define SIO_DEV_LSB 0x61
-
-#define SIO_GP_DEV 0x7
-
-#define SIO_GP_BASE SIO_GPIO_START
-#define SIO_GP_MSB (SIO_GP_BASE>>8)
-#define SIO_GP_LSB (SIO_GP_BASE&0xff)
-
-#define SIO_GP_DATA1 (SIO_GP_BASE+0)
-
-#define SIO_PM_DEV 0x8
-
-#define SIO_PM_BASE SIO_PM_START
-#define SIO_PM_MSB (SIO_PM_BASE>>8)
-#define SIO_PM_LSB (SIO_PM_BASE&0xff)
-#define SIO_PM_INDEX (SIO_PM_BASE+0)
-#define SIO_PM_DATA (SIO_PM_BASE+1)
-
-#define SIO_PM_FER2 0x1
-
-#define SIO_PM_GP_EN 0x80
-
-
-
-/*
- * This is the dev/reg where generating a config cycle will
- * result in a PCI special cycle.
- */
-#define SPECIAL_DEV 0xff
-#define SPECIAL_REG 0x00
-
-/*
- * PIIX4 needs to see a special cycle with the following data
- * to be convinced the processor has gone into the stop grant
- * state. PIIX4 insists on seeing this before it will power
- * down a system.
- */
-#define PIIX_SPECIAL_STOP 0x00120002
-
-#define PIIX4_RESET_PORT 0xcf9
-#define PIIX4_RESET_VAL 0x6
-
-#define PMSTS_PORT 0xf80 // 2 bytes PM Status
-#define PMEN_PORT 0xf82 // 2 bytes PM Enable
-#define PMCNTRL_PORT 0xf84 // 2 bytes PM Control
-
-#define PM_SUSPEND_ENABLE 0x2000 // start sequence to suspend state
-
-/*
- * PMSTS and PMEN I/O bit definitions.
- * (Bits are the same in both registers)
- */
-#define PM_STS_RSM (1<<15) // Resume Status
-#define PM_STS_PWRBTNOR (1<<11) // Power Button Override
-#define PM_STS_RTC (1<<10) // RTC status
-#define PM_STS_PWRBTN (1<<8) // Power Button Pressed?
-#define PM_STS_GBL (1<<5) // Global Status
-#define PM_STS_BM (1<<4) // Bus Master Status
-#define PM_STS_TMROF (1<<0) // Timer Overflow Status.
-
-/*
- * Stop clock GPI register
- */
-#define PIIX_GPIREG0 (0xf80 + 0x30)
-
-/*
- * Stop clock GPI bit in GPIREG0
- */
-#define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in
-
-#endif /* _ASM_X86_VISWS_PIIX4_H */
diff --git a/arch/x86/include/asm/visws/sgivw.h b/arch/x86/include/asm/visws/sgivw.h
deleted file mode 100644
index 5fbf63e1003..00000000000
--- a/arch/x86/include/asm/visws/sgivw.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * Frame buffer position and size:
- */
-extern unsigned long sgivwfb_mem_phys;
-extern unsigned long sgivwfb_mem_size;
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index f9303602fbc..1d8de3f3fec 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -1,133 +1,9 @@
#ifndef _ASM_X86_VM86_H
#define _ASM_X86_VM86_H
-/*
- * I'm guessing at the VIF/VIP flag usage, but hope that this is how
- * the Pentium uses them. Linux will return from vm86 mode when both
- * VIF and VIP is set.
- *
- * On a Pentium, we could probably optimize the virtual flags directly
- * in the eflags register instead of doing it "by hand" in vflags...
- *
- * Linus
- */
-
-#include <asm/processor-flags.h>
-
-#define BIOSSEG 0x0f000
-
-#define CPU_086 0
-#define CPU_186 1
-#define CPU_286 2
-#define CPU_386 3
-#define CPU_486 4
-#define CPU_586 5
-
-/*
- * Return values for the 'vm86()' system call
- */
-#define VM86_TYPE(retval) ((retval) & 0xff)
-#define VM86_ARG(retval) ((retval) >> 8)
-
-#define VM86_SIGNAL 0 /* return due to signal */
-#define VM86_UNKNOWN 1 /* unhandled GP fault
- - IO-instruction or similar */
-#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
-#define VM86_STI 3 /* sti/popf/iret instruction enabled
- virtual interrupts */
-
-/*
- * Additional return values when invoking new vm86()
- */
-#define VM86_PICRETURN 4 /* return due to pending PIC request */
-#define VM86_TRAP 6 /* return due to DOS-debugger request */
-
-/*
- * function codes when invoking new vm86()
- */
-#define VM86_PLUS_INSTALL_CHECK 0
-#define VM86_ENTER 1
-#define VM86_ENTER_NO_BYPASS 2
-#define VM86_REQUEST_IRQ 3
-#define VM86_FREE_IRQ 4
-#define VM86_GET_IRQ_BITS 5
-#define VM86_GET_AND_RESET_IRQ 6
-
-/*
- * This is the stack-layout seen by the user space program when we have
- * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
- * is 'kernel_vm86_regs' (see below).
- */
-
-struct vm86_regs {
-/*
- * normal regs, with special meaning for the segment descriptors..
- */
- long ebx;
- long ecx;
- long edx;
- long esi;
- long edi;
- long ebp;
- long eax;
- long __null_ds;
- long __null_es;
- long __null_fs;
- long __null_gs;
- long orig_eax;
- long eip;
- unsigned short cs, __csh;
- long eflags;
- long esp;
- unsigned short ss, __ssh;
-/*
- * these are specific to v86 mode:
- */
- unsigned short es, __esh;
- unsigned short ds, __dsh;
- unsigned short fs, __fsh;
- unsigned short gs, __gsh;
-};
-
-struct revectored_struct {
- unsigned long __map[8]; /* 256 bits */
-};
-
-struct vm86_struct {
- struct vm86_regs regs;
- unsigned long flags;
- unsigned long screen_bitmap;
- unsigned long cpu_type;
- struct revectored_struct int_revectored;
- struct revectored_struct int21_revectored;
-};
-
-/*
- * flags masks
- */
-#define VM86_SCREEN_BITMAP 0x0001
-
-struct vm86plus_info_struct {
- unsigned long force_return_for_pic:1;
- unsigned long vm86dbg_active:1; /* for debugger */
- unsigned long vm86dbg_TFpendig:1; /* for debugger */
- unsigned long unused:28;
- unsigned long is_vm86pus:1; /* for vm86 internal use */
- unsigned char vm86dbg_intxxtab[32]; /* for debugger */
-};
-struct vm86plus_struct {
- struct vm86_regs regs;
- unsigned long flags;
- unsigned long screen_bitmap;
- unsigned long cpu_type;
- struct revectored_struct int_revectored;
- struct revectored_struct int21_revectored;
- struct vm86plus_info_struct vm86plus;
-};
-
-#ifdef __KERNEL__
#include <asm/ptrace.h>
+#include <uapi/asm/vm86.h>
/*
* This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
@@ -203,6 +79,4 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
#endif /* CONFIG_VM86 */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_VM86_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 31f180c21ce..7004d21e621 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -1,6 +1,3 @@
-#ifndef VMX_H
-#define VMX_H
-
/*
* vmx.h: VMX Architecture related definitions
* Copyright (c) 2004, Intel Corporation.
@@ -24,8 +21,12 @@
* Yaniv Kamay <yaniv@qumranet.com>
*
*/
+#ifndef VMX_H
+#define VMX_H
+
#include <linux/types.h>
+#include <uapi/asm/vmx.h>
/*
* Definitions of Primary Processor-Based VM-Execution Controls.
@@ -56,15 +57,24 @@
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
#define SECONDARY_EXEC_RDTSCP 0x00000008
+#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
+#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
+#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
+#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
+#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
#define PIN_BASED_EXT_INTR_MASK 0x00000001
#define PIN_BASED_NMI_EXITING 0x00000008
#define PIN_BASED_VIRTUAL_NMIS 0x00000020
+#define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
+#define PIN_BASED_POSTED_INTR 0x00000080
+
+#define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
#define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000002
#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
@@ -75,6 +85,9 @@
#define VM_EXIT_SAVE_IA32_EFER 0x00100000
#define VM_EXIT_LOAD_IA32_EFER 0x00200000
#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
+#define VM_EXIT_CLEAR_BNDCFGS 0x00800000
+
+#define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
#define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000002
#define VM_ENTRY_IA32E_MODE 0x00000200
@@ -83,10 +96,18 @@
#define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
#define VM_ENTRY_LOAD_IA32_PAT 0x00004000
#define VM_ENTRY_LOAD_IA32_EFER 0x00008000
+#define VM_ENTRY_LOAD_BNDCFGS 0x00010000
+
+#define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
+
+#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
+#define VMX_MISC_SAVE_EFER_LMA 0x00000020
+#define VMX_MISC_ACTIVITY_HLT 0x00000040
/* VMCS Encodings */
enum vmcs_field {
VIRTUAL_PROCESSOR_ID = 0x00000000,
+ POSTED_INTR_NV = 0x00000002,
GUEST_ES_SELECTOR = 0x00000800,
GUEST_CS_SELECTOR = 0x00000802,
GUEST_SS_SELECTOR = 0x00000804,
@@ -95,6 +116,7 @@ enum vmcs_field {
GUEST_GS_SELECTOR = 0x0000080a,
GUEST_LDTR_SELECTOR = 0x0000080c,
GUEST_TR_SELECTOR = 0x0000080e,
+ GUEST_INTR_STATUS = 0x00000810,
HOST_ES_SELECTOR = 0x00000c00,
HOST_CS_SELECTOR = 0x00000c02,
HOST_SS_SELECTOR = 0x00000c04,
@@ -120,8 +142,20 @@ enum vmcs_field {
VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013,
APIC_ACCESS_ADDR = 0x00002014,
APIC_ACCESS_ADDR_HIGH = 0x00002015,
+ POSTED_INTR_DESC_ADDR = 0x00002016,
+ POSTED_INTR_DESC_ADDR_HIGH = 0x00002017,
EPT_POINTER = 0x0000201a,
EPT_POINTER_HIGH = 0x0000201b,
+ EOI_EXIT_BITMAP0 = 0x0000201c,
+ EOI_EXIT_BITMAP0_HIGH = 0x0000201d,
+ EOI_EXIT_BITMAP1 = 0x0000201e,
+ EOI_EXIT_BITMAP1_HIGH = 0x0000201f,
+ EOI_EXIT_BITMAP2 = 0x00002020,
+ EOI_EXIT_BITMAP2_HIGH = 0x00002021,
+ EOI_EXIT_BITMAP3 = 0x00002022,
+ EOI_EXIT_BITMAP3_HIGH = 0x00002023,
+ VMREAD_BITMAP = 0x00002026,
+ VMWRITE_BITMAP = 0x00002028,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
VMCS_LINK_POINTER = 0x00002800,
@@ -142,6 +176,8 @@ enum vmcs_field {
GUEST_PDPTR2_HIGH = 0x0000280f,
GUEST_PDPTR3 = 0x00002810,
GUEST_PDPTR3_HIGH = 0x00002811,
+ GUEST_BNDCFGS = 0x00002812,
+ GUEST_BNDCFGS_HIGH = 0x00002813,
HOST_IA32_PAT = 0x00002c00,
HOST_IA32_PAT_HIGH = 0x00002c01,
HOST_IA32_EFER = 0x00002c02,
@@ -195,6 +231,7 @@ enum vmcs_field {
GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
GUEST_ACTIVITY_STATE = 0X00004826,
GUEST_SYSENTER_CS = 0x0000482A,
+ VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
HOST_IA32_SYSENTER_CS = 0x00004c00,
CR0_GUEST_HOST_MASK = 0x00006000,
CR4_GUEST_HOST_MASK = 0x00006002,
@@ -240,48 +277,6 @@ enum vmcs_field {
HOST_RIP = 0x00006c16,
};
-#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
-
-#define EXIT_REASON_EXCEPTION_NMI 0
-#define EXIT_REASON_EXTERNAL_INTERRUPT 1
-#define EXIT_REASON_TRIPLE_FAULT 2
-
-#define EXIT_REASON_PENDING_INTERRUPT 7
-#define EXIT_REASON_NMI_WINDOW 8
-#define EXIT_REASON_TASK_SWITCH 9
-#define EXIT_REASON_CPUID 10
-#define EXIT_REASON_HLT 12
-#define EXIT_REASON_INVD 13
-#define EXIT_REASON_INVLPG 14
-#define EXIT_REASON_RDPMC 15
-#define EXIT_REASON_RDTSC 16
-#define EXIT_REASON_VMCALL 18
-#define EXIT_REASON_VMCLEAR 19
-#define EXIT_REASON_VMLAUNCH 20
-#define EXIT_REASON_VMPTRLD 21
-#define EXIT_REASON_VMPTRST 22
-#define EXIT_REASON_VMREAD 23
-#define EXIT_REASON_VMRESUME 24
-#define EXIT_REASON_VMWRITE 25
-#define EXIT_REASON_VMOFF 26
-#define EXIT_REASON_VMON 27
-#define EXIT_REASON_CR_ACCESS 28
-#define EXIT_REASON_DR_ACCESS 29
-#define EXIT_REASON_IO_INSTRUCTION 30
-#define EXIT_REASON_MSR_READ 31
-#define EXIT_REASON_MSR_WRITE 32
-#define EXIT_REASON_INVALID_STATE 33
-#define EXIT_REASON_MWAIT_INSTRUCTION 36
-#define EXIT_REASON_MONITOR_INSTRUCTION 39
-#define EXIT_REASON_PAUSE_INSTRUCTION 40
-#define EXIT_REASON_MCE_DURING_VMENTRY 41
-#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
-#define EXIT_REASON_APIC_ACCESS 44
-#define EXIT_REASON_EPT_VIOLATION 48
-#define EXIT_REASON_EPT_MISCONFIG 49
-#define EXIT_REASON_WBINVD 54
-#define EXIT_REASON_XSETBV 55
-
/*
* Interruption-information format
*/
@@ -386,9 +381,9 @@ enum vmcs_field {
#define AR_RESERVD_MASK 0xfffe0f00
-#define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 0)
-#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 1)
-#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 2)
+#define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0)
+#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1)
+#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2)
#define VMX_NR_VPIDS (1 << 16)
#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
@@ -397,6 +392,7 @@ enum vmcs_field {
#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
#define VMX_EPT_EXTENT_CONTEXT 1
#define VMX_EPT_EXTENT_GLOBAL 2
+#define VMX_EPT_EXTENT_SHIFT 24
#define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
#define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
@@ -404,7 +400,8 @@ enum vmcs_field {
#define VMX_EPTP_WB_BIT (1ull << 14)
#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
-#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
+#define VMX_EPT_INVEPT_BIT (1ull << 20)
+#define VMX_EPT_AD_BIT (1ull << 21)
#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
@@ -415,11 +412,14 @@ enum vmcs_field {
#define VMX_EPT_MAX_GAW 0x4
#define VMX_EPT_MT_EPTE_SHIFT 3
#define VMX_EPT_GAW_EPTP_SHIFT 3
+#define VMX_EPT_AD_ENABLE_BIT (1ull << 6)
#define VMX_EPT_DEFAULT_MT 0x6ull
#define VMX_EPT_READABLE_MASK 0x1ull
#define VMX_EPT_WRITABLE_MASK 0x2ull
#define VMX_EPT_EXECUTABLE_MASK 0x4ull
#define VMX_EPT_IPAT_BIT (1ull << 6)
+#define VMX_EPT_ACCESS_BIT (1ull << 8)
+#define VMX_EPT_DIRTY_BIT (1ull << 9)
#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index eaea1d31f75..2a46ca720af 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -1,20 +1,8 @@
#ifndef _ASM_X86_VSYSCALL_H
#define _ASM_X86_VSYSCALL_H
-enum vsyscall_num {
- __NR_vgettimeofday,
- __NR_vtime,
- __NR_vgetcpu,
-};
-
-#define VSYSCALL_START (-10UL << 20)
-#define VSYSCALL_SIZE 1024
-#define VSYSCALL_END (-2UL << 20)
-#define VSYSCALL_MAPPED_PAGES 1
-#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
-
-#ifdef __KERNEL__
#include <linux/seqlock.h>
+#include <uapi/asm/vsyscall.h>
#define VGETCPU_RDTSCP 1
#define VGETCPU_LSL 2
@@ -33,6 +21,24 @@ extern void map_vsyscall(void);
*/
extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
-#endif /* __KERNEL__ */
+#ifdef CONFIG_X86_64
+
+#define VGETCPU_CPU_MASK 0xfff
+
+static inline unsigned int __getcpu(void)
+{
+ unsigned int p;
+
+ if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
+ /* Load per CPU data from RDTSCP */
+ native_read_tscp(&p);
+ } else {
+ /* Load per CPU data from GDT */
+ asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ }
+
+ return p;
+}
+#endif /* CONFIG_X86_64 */
#endif /* _ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
index de656ac2af4..5d2b9ad2c6d 100644
--- a/arch/x86/include/asm/vvar.h
+++ b/arch/x86/include/asm/vvar.h
@@ -16,8 +16,8 @@
* you mess up, the linker will catch it.)
*/
-/* Base address of vvars. This is not ABI. */
-#define VVAR_ADDRESS (-10*1024*1024 - 4096)
+#ifndef _ASM_X86_VVAR_H
+#define _ASM_X86_VVAR_H
#if defined(__VVAR_KERNEL_LDS)
@@ -29,15 +29,16 @@
#else
+extern char __vvar_page;
+
#define DECLARE_VVAR(offset, type, name) \
- static type const * const vvaraddr_ ## name = \
- (void *)(VVAR_ADDRESS + (offset));
+ extern type vvar_ ## name __attribute__((visibility("hidden")));
+
+#define VVAR(name) (vvar_ ## name)
#define DEFINE_VVAR(type, name) \
type name \
- __attribute__((section(".vvar_" #name), aligned(16)))
-
-#define VVAR(name) (*vvaraddr_ ## name)
+ __attribute__((section(".vvar_" #name), aligned(16))) __visible
#endif
@@ -48,3 +49,5 @@ DECLARE_VVAR(16, int, vgetcpu_mode)
DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data)
#undef DECLARE_VVAR
+
+#endif
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
new file mode 100644
index 00000000000..5b238981542
--- /dev/null
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -0,0 +1,105 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <linux/kernel.h>
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608ul >> 56;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, dummy;
+
+ asm(
+ "1:\tmov %2,%0\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\t"
+ "lea %2,%1\n\t"
+ "and %3,%1\n\t"
+ "mov (%1),%0\n\t"
+ "leal %2,%%ecx\n\t"
+ "andl %4,%%ecx\n\t"
+ "shll $3,%%ecx\n\t"
+ "shr %%cl,%0\n\t"
+ "jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ :"=&r" (ret),"=&c" (dummy)
+ :"m" (*(unsigned long *)addr),
+ "i" (-sizeof(unsigned long)),
+ "i" (sizeof(unsigned long)-1));
+ return ret;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
index 6bf5b8e478c..f90f0a587c6 100644
--- a/arch/x86/include/asm/x2apic.h
+++ b/arch/x86/include/asm/x2apic.h
@@ -9,13 +9,9 @@
#include <asm/ipi.h>
#include <linux/cpumask.h>
-/*
- * Need to use more than cpu 0, because we need more vectors
- * when MSI-X are used.
- */
-static const struct cpumask *x2apic_target_cpus(void)
+static int x2apic_apic_id_valid(int apicid)
{
- return cpu_online_mask;
+ return 1;
}
static int x2apic_apic_id_registered(void)
@@ -23,15 +19,6 @@ static int x2apic_apic_id_registered(void)
return 1;
}
-/*
- * For now each logical cpu is in its own vector allocation domain.
- */
-static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- cpumask_clear(retmask);
- cpumask_set_cpu(cpu, retmask);
-}
-
static void
__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
{
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 517d4767ffd..e45e4da96bf 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -69,24 +69,14 @@ struct x86_init_oem {
};
/**
- * struct x86_init_mapping - platform specific initial kernel pagetable setup
- * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage
- *
- * For more details on the purpose of this hook, look in
- * init_memory_mapping and the commit that added it.
- */
-struct x86_init_mapping {
- void (*pagetable_reserve)(u64 start, u64 end);
-};
-
-/**
* struct x86_init_paging - platform specific paging functions
- * @pagetable_setup_start: platform specific pre paging_init() call
- * @pagetable_setup_done: platform specific post paging_init() call
+ * @pagetable_init: platform specific paging initialization call to setup
+ * the kernel pagetables and prepare accessors functions.
+ * Callback must call paging_init(). Called once after the
+ * direct mapping for phys memory is available.
*/
struct x86_init_paging {
- void (*pagetable_setup_start)(pgd_t *base);
- void (*pagetable_setup_done)(pgd_t *base);
+ void (*pagetable_init)(void);
};
/**
@@ -135,7 +125,6 @@ struct x86_init_ops {
struct x86_init_mpparse mpparse;
struct x86_init_irqs irqs;
struct x86_init_oem oem;
- struct x86_init_mapping mapping;
struct x86_init_paging paging;
struct x86_init_timers timers;
struct x86_init_iommu iommu;
@@ -145,50 +134,86 @@ struct x86_init_ops {
/**
* struct x86_cpuinit_ops - platform specific cpu hotplug setups
* @setup_percpu_clockev: set up the per cpu clock event device
+ * @early_percpu_clock_init: early init of the per cpu clock event device
*/
struct x86_cpuinit_ops {
void (*setup_percpu_clockev)(void);
+ void (*early_percpu_clock_init)(void);
void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
};
+struct timespec;
+
/**
* struct x86_platform_ops - platform specific runtime functions
* @calibrate_tsc: calibrate TSC
- * @wallclock_init: init the wallclock device
* @get_wallclock: get time from HW clock like RTC etc.
* @set_wallclock: set time back to HW clock
* @is_untracked_pat_range exclude from PAT logic
* @nmi_init enable NMI on cpus
* @i8042_detect pre-detect if i8042 controller exists
+ * @save_sched_clock_state: save state for sched_clock() on suspend
+ * @restore_sched_clock_state: restore state for sched_clock() on resume
+ * @apic_post_init: adjust apic if neeeded
*/
struct x86_platform_ops {
unsigned long (*calibrate_tsc)(void);
- void (*wallclock_init)(void);
- unsigned long (*get_wallclock)(void);
- int (*set_wallclock)(unsigned long nowtime);
+ void (*get_wallclock)(struct timespec *ts);
+ int (*set_wallclock)(const struct timespec *ts);
void (*iommu_shutdown)(void);
bool (*is_untracked_pat_range)(u64 start, u64 end);
void (*nmi_init)(void);
unsigned char (*get_nmi_reason)(void);
int (*i8042_detect)(void);
+ void (*save_sched_clock_state)(void);
+ void (*restore_sched_clock_state)(void);
+ void (*apic_post_init)(void);
};
struct pci_dev;
+struct msi_msg;
+struct msi_desc;
struct x86_msi_ops {
int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
+ void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
+ unsigned int dest, struct msi_msg *msg,
+ u8 hpet_id);
void (*teardown_msi_irq)(unsigned int irq);
void (*teardown_msi_irqs)(struct pci_dev *dev);
- void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
+ void (*restore_msi_irqs)(struct pci_dev *dev);
+ int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
+ u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
+ u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
+};
+
+struct IO_APIC_route_entry;
+struct io_apic_irq_attr;
+struct irq_data;
+struct cpumask;
+
+struct x86_io_apic_ops {
+ void (*init) (void);
+ unsigned int (*read) (unsigned int apic, unsigned int reg);
+ void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
+ void (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
+ void (*disable)(void);
+ void (*print_entries)(unsigned int apic, unsigned int nr_entries);
+ int (*set_affinity)(struct irq_data *data,
+ const struct cpumask *mask,
+ bool force);
+ int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
+ unsigned int destination, int vector,
+ struct io_apic_irq_attr *attr);
+ void (*eoi_ioapic_pin)(int apic, int pin, int vector);
};
extern struct x86_init_ops x86_init;
extern struct x86_cpuinit_ops x86_cpuinit;
extern struct x86_platform_ops x86_platform;
extern struct x86_msi_ops x86_msi;
-
+extern struct x86_io_apic_ops x86_io_apic_ops;
extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused);
-extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node);
#endif
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 1df35417c41..608a79d5a46 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -6,6 +6,8 @@ enum ipi_vector {
XEN_CALL_FUNCTION_VECTOR,
XEN_CALL_FUNCTION_SINGLE_VECTOR,
XEN_SPIN_UNLOCK_VECTOR,
+ XEN_IRQ_WORK_VECTOR,
+ XEN_NMI_VECTOR,
XEN_NR_IPIS,
};
@@ -15,4 +17,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->flags);
}
+/* No need for a barrier -- XCHG is a barrier on x86. */
+#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
+
#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 5728852fb90..ca08a27b90b 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -48,6 +48,7 @@
#include <xen/interface/sched.h>
#include <xen/interface/physdev.h>
#include <xen/interface/platform.h>
+#include <xen/interface/xen-mca.h>
/*
* The hypercall asms have to meet several constraints:
@@ -302,6 +303,13 @@ HYPERVISOR_set_timer_op(u64 timeout)
}
static inline int
+HYPERVISOR_mca(struct xen_mc *mc_op)
+{
+ mc_op->interface_version = XEN_MCA_INTERFACE_VERSION;
+ return _hypercall1(int, mca, mc_op);
+}
+
+static inline int
HYPERVISOR_dom0_op(struct xen_platform_op *platform_op)
{
platform_op->interface_version = XENPF_INTERFACE_VERSION;
@@ -335,7 +343,7 @@ HYPERVISOR_memory_op(unsigned int cmd, void *arg)
}
static inline int
-HYPERVISOR_multicall(void *call_list, int nr_calls)
+HYPERVISOR_multicall(void *call_list, uint32_t nr_calls)
{
return _hypercall2(int, multicall, call_list, nr_calls);
}
@@ -351,18 +359,14 @@ HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
return _hypercall4(int, update_va_mapping, va,
new_val.pte, new_val.pte >> 32, flags);
}
+extern int __must_check xen_event_channel_op_compat(int, void *);
static inline int
HYPERVISOR_event_channel_op(int cmd, void *arg)
{
int rc = _hypercall2(int, event_channel_op, cmd, arg);
- if (unlikely(rc == -ENOSYS)) {
- struct evtchn_op op;
- op.cmd = cmd;
- memcpy(&op.u, arg, sizeof(op.u));
- rc = _hypercall1(int, event_channel_op_compat, &op);
- memcpy(arg, &op.u, sizeof(op.u));
- }
+ if (unlikely(rc == -ENOSYS))
+ rc = xen_event_channel_op_compat(cmd, arg);
return rc;
}
@@ -378,17 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
return _hypercall3(int, console_io, cmd, count, str);
}
+extern int __must_check xen_physdev_op_compat(int, void *);
+
static inline int
HYPERVISOR_physdev_op(int cmd, void *arg)
{
int rc = _hypercall2(int, physdev_op, cmd, arg);
- if (unlikely(rc == -ENOSYS)) {
- struct physdev_op op;
- op.cmd = cmd;
- memcpy(&op.u, arg, sizeof(op.u));
- rc = _hypercall1(int, physdev_op_compat, &op);
- memcpy(arg, &op.u, sizeof(op.u));
- }
+ if (unlikely(rc == -ENOSYS))
+ rc = xen_physdev_op_compat(cmd, arg);
return rc;
}
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 66d0fff1ee8..d866959e568 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -33,7 +33,6 @@
#ifndef _ASM_X86_XEN_HYPERVISOR_H
#define _ASM_X86_XEN_HYPERVISOR_H
-/* arch/i386/kernel/setup.c */
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
@@ -41,21 +40,7 @@ extern struct start_info *xen_start_info;
static inline uint32_t xen_cpuid_base(void)
{
- uint32_t base, eax, ebx, ecx, edx;
- char signature[13];
-
- for (base = 0x40000000; base < 0x40010000; base += 0x100) {
- cpuid(base, &eax, &ebx, &ecx, &edx);
- *(uint32_t *)(signature + 0) = ebx;
- *(uint32_t *)(signature + 4) = ecx;
- *(uint32_t *)(signature + 8) = edx;
- signature[12] = 0;
-
- if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2))
- return base;
- }
-
- return 0;
+ return hypervisor_cpuid_base("XenVMMXenVMM", 2);
}
#ifdef CONFIG_XEN
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index a1f2db5f117..3400dbaec3c 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -47,15 +47,26 @@
#endif
#ifndef __ASSEMBLY__
+/* Explicitly size integers that represent pfns in the public interface
+ * with Xen so that on ARM we can have one ABI that works for 32 and 64
+ * bit guests. */
+typedef unsigned long xen_pfn_t;
+#define PRI_xen_pfn "lx"
+typedef unsigned long xen_ulong_t;
+#define PRI_xen_ulong "lx"
+typedef long xen_long_t;
+#define PRI_xen_long "lx"
+
/* Guest handles for primitive C types. */
__DEFINE_GUEST_HANDLE(uchar, unsigned char);
__DEFINE_GUEST_HANDLE(uint, unsigned int);
-__DEFINE_GUEST_HANDLE(ulong, unsigned long);
DEFINE_GUEST_HANDLE(char);
DEFINE_GUEST_HANDLE(int);
-DEFINE_GUEST_HANDLE(long);
DEFINE_GUEST_HANDLE(void);
DEFINE_GUEST_HANDLE(uint64_t);
+DEFINE_GUEST_HANDLE(uint32_t);
+DEFINE_GUEST_HANDLE(xen_pfn_t);
+DEFINE_GUEST_HANDLE(xen_ulong_t);
#endif
#ifndef HYPERVISOR_VIRT_START
@@ -115,11 +126,13 @@ struct arch_shared_info {
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_X86_32
-#include "interface_32.h"
+#include <asm/xen/interface_32.h>
#else
-#include "interface_64.h"
+#include <asm/xen/interface_64.h>
#endif
+#include <asm/pvclock-abi.h>
+
#ifndef __ASSEMBLY__
/*
* The following is all CPU context. Note that the fpu_ctxt block is filled
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h
new file mode 100644
index 00000000000..7f02fe4e2c7
--- /dev/null
+++ b/arch/x86/include/asm/xen/page-coherent.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_X86_XEN_PAGE_COHERENT_H
+#define _ASM_X86_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ void *vstart = (void*)__get_free_pages(flags, get_order(size));
+ *dma_handle = virt_to_phys(vstart);
+ return vstart;
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs) { }
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs) { }
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+#endif /* _ASM_X86_XEN_PAGE_COHERENT_H */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index c34f96c2f7a..c949923a566 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -44,13 +44,22 @@ extern unsigned long machine_to_phys_nr;
extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
+extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count);
extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op);
-extern int m2p_remove_override(struct page *page, bool clear_pte);
+extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count);
+extern int m2p_remove_override(struct page *page,
+ struct gnttab_map_grant_ref *kmap_op,
+ unsigned long mfn);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
@@ -77,30 +86,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
}
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
{
unsigned long pfn;
- int ret = 0;
+ int ret;
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
- if (unlikely(mfn >= machine_to_phys_nr)) {
- pfn = ~0;
- goto try_override;
- }
- pfn = 0;
+ if (unlikely(mfn >= machine_to_phys_nr))
+ return ~0;
+
/*
* The array access can fail (e.g., device space beyond end of RAM).
* In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing!
*/
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-try_override:
- /* ret might be < 0 if there are no entries in the m2p for mfn */
if (ret < 0)
- pfn = ~0;
- else if (get_phys_to_machine(pfn) != mfn)
+ return ~0;
+
+ return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (get_phys_to_machine(pfn) != mfn) {
/*
* If this appears to be a foreign mfn (because the pfn
* doesn't map back to the mfn), then check the local override
@@ -109,8 +126,9 @@ try_override:
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
*/
pfn = m2p_find_override_pfn(mfn, ~0);
+ }
- /*
+ /*
* pfn is ~0 if there are no entries in the m2p for mfn or if the
* entry doesn't map back to the mfn and m2p_override doesn't have a
* valid entry for it.
@@ -156,7 +174,12 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
*/
static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
{
- unsigned long pfn = mfn_to_pfn(mfn);
+ unsigned long pfn;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+ pfn = mfn_to_pfn(mfn);
if (get_phys_to_machine(pfn) != mfn)
return -1; /* force !pfn_valid() */
return pfn;
@@ -210,4 +233,7 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr);
void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr);
+#define xen_remap(cookie, size) ioremap((cookie), (size));
+#define xen_unmap(cookie) iounmap((cookie))
+
#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h
index 1be1ab7d6a4..ee52fcac6f7 100644
--- a/arch/x86/include/asm/xen/swiotlb-xen.h
+++ b/arch/x86/include/asm/xen/swiotlb-xen.h
@@ -5,10 +5,12 @@
extern int xen_swiotlb;
extern int __init pci_xen_swiotlb_detect(void);
extern void __init pci_xen_swiotlb_init(void);
+extern int pci_xen_swiotlb_init_late(void);
#else
#define xen_swiotlb (0)
static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
static inline void __init pci_xen_swiotlb_init(void) { }
+static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
#endif
#endif /* _ASM_X86_SWIOTLB_XEN_H */
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index 7fcf6f3dbcc..d8829751b3f 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -1,10 +1,499 @@
#ifdef CONFIG_KMEMCHECK
/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
# include <asm-generic/xor.h>
-#else
+#elif !defined(_ASM_X86_XOR_H)
+#define _ASM_X86_XOR_H
+
+/*
+ * Optimized RAID-5 checksumming functions for SSE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Cache avoiding checksumming functions utilizing KNI instructions
+ * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
+ */
+
+/*
+ * Based on
+ * High-speed RAID5 checksumming functions utilizing SSE instructions.
+ * Copyright (C) 1998 Ingo Molnar.
+ */
+
+/*
+ * x86-64 changes / gcc fixes from Andi Kleen.
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ *
+ * This hasn't been optimized for the hammer yet, but there are likely
+ * no advantages to be gotten from x86-64 here anyways.
+ */
+
+#include <asm/i387.h>
+
#ifdef CONFIG_X86_32
-# include "xor_32.h"
+/* reduce register pressure */
+# define XOR_CONSTANT_CONSTRAINT "i"
#else
-# include "xor_64.h"
+# define XOR_CONSTANT_CONSTRAINT "re"
#endif
+
+#define OFFS(x) "16*("#x")"
+#define PF_OFFS(x) "256+16*("#x")"
+#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
+#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
+#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
+#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
+#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
+#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
+#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
+#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
+#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
+#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
+#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
+#define NOP(x)
+
+#define BLK64(pf, op, i) \
+ pf(i) \
+ op(i, 0) \
+ op(i + 1, 1) \
+ op(i + 2, 2) \
+ op(i + 3, 3)
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF2(i) \
+ PF2(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(PF2, XO2, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF2(i) \
+ PF2(i + 2) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ PF3(i) \
+ PF3(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ XO3(i, 0) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1),
+ [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(PF2, XO2, i) \
+ BLK64(PF3, XO3, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1),
+ [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF2(i) \
+ PF2(i + 2) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ PF3(i) \
+ PF3(i + 2) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ PF4(i) \
+ PF4(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO3(i, 0) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
+ XO4(i, 0) \
+ XO4(i + 1, 1) \
+ XO4(i + 2, 2) \
+ XO4(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " add %[inc], %[p5] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
+ [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(PF2, XO2, i) \
+ BLK64(PF3, XO3, i) \
+ BLK64(PF4, XO4, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " add %[inc], %[p5] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
+ [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_sse_pf64 = {
+ .name = "prefetch64-sse",
+ .do_2 = xor_sse_2_pf64,
+ .do_3 = xor_sse_3_pf64,
+ .do_4 = xor_sse_4_pf64,
+ .do_5 = xor_sse_5_pf64,
+};
+
+#undef LD
+#undef XO1
+#undef XO2
+#undef XO3
+#undef XO4
+#undef ST
+#undef NOP
+#undef BLK64
+#undef BLOCK
+
+#undef XOR_CONSTANT_CONSTRAINT
+
+#ifdef CONFIG_X86_32
+# include <asm/xor_32.h>
+#else
+# include <asm/xor_64.h>
#endif
+
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+ AVX_SELECT(FASTEST)
+
+#endif /* _ASM_X86_XOR_H */
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index 133b40a0f49..ce05722e3c6 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -2,7 +2,7 @@
#define _ASM_X86_XOR_32_H
/*
- * Optimized RAID-5 checksumming functions for MMX and SSE.
+ * Optimized RAID-5 checksumming functions for MMX.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -529,330 +529,6 @@ static struct xor_block_template xor_block_p5_mmx = {
.do_5 = xor_p5_mmx_5,
};
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-#define XMMS_SAVE \
-do { \
- preempt_disable(); \
- cr0 = read_cr0(); \
- clts(); \
- asm volatile( \
- "movups %%xmm0,(%0) ;\n\t" \
- "movups %%xmm1,0x10(%0) ;\n\t" \
- "movups %%xmm2,0x20(%0) ;\n\t" \
- "movups %%xmm3,0x30(%0) ;\n\t" \
- : \
- : "r" (xmm_save) \
- : "memory"); \
-} while (0)
-
-#define XMMS_RESTORE \
-do { \
- asm volatile( \
- "sfence ;\n\t" \
- "movups (%0),%%xmm0 ;\n\t" \
- "movups 0x10(%0),%%xmm1 ;\n\t" \
- "movups 0x20(%0),%%xmm2 ;\n\t" \
- "movups 0x30(%0),%%xmm3 ;\n\t" \
- : \
- : "r" (xmm_save) \
- : "memory"); \
- write_cr0(cr0); \
- preempt_enable(); \
-} while (0)
-
-#define ALIGN16 __attribute__((aligned(16)))
-
-#define OFFS(x) "16*("#x")"
-#define PF_OFFS(x) "256+16*("#x")"
-#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
-#define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
-#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
-#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
-#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
-#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
-#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
-#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
-#define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
-#define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
-#define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
-#define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
-#define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- LD(i, 0) \
- LD(i + 1, 1) \
- PF1(i) \
- PF1(i + 2) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO1(i, 0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- ST(i, 0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2)
- :
- : "memory");
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i,0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO1(i,0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- XO2(i,0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- ST(i,0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r"(p2), "+r"(p3)
- :
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i,0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- XO1(i,0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- PF3(i) \
- PF3(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO2(i,0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- XO3(i,0) \
- XO3(i + 1, 1) \
- XO3(i + 2, 2) \
- XO3(i + 3, 3) \
- ST(i,0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
- :
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned long lines = bytes >> 8;
- char xmm_save[16*4] ALIGN16;
- int cr0;
-
- XMMS_SAVE;
-
- /* Make sure GCC forgets anything it knows about p4 or p5,
- such that it won't pass to the asm volatile below a
- register that is shared with any other variable. That's
- because we modify p4 and p5 there, but we can't mark them
- as read/write, otherwise we'd overflow the 10-asm-operands
- limit of GCC < 3.1. */
- asm("" : "+r" (p4), "+r" (p5));
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i,0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- XO1(i,0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- PF3(i) \
- PF3(i + 2) \
- XO2(i,0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- PF4(i) \
- PF4(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO3(i,0) \
- XO3(i + 1, 1) \
- XO3(i + 2, 2) \
- XO3(i + 3, 3) \
- XO4(i,0) \
- XO4(i + 1, 1) \
- XO4(i + 2, 2) \
- XO4(i + 3, 3) \
- ST(i,0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addl $256, %1 ;\n"
- " addl $256, %2 ;\n"
- " addl $256, %3 ;\n"
- " addl $256, %4 ;\n"
- " addl $256, %5 ;\n"
- " decl %0 ;\n"
- " jnz 1b ;\n"
- : "+r" (lines),
- "+r" (p1), "+r" (p2), "+r" (p3)
- : "r" (p4), "r" (p5)
- : "memory");
-
- /* p4 and p5 were modified, and now the variables are dead.
- Clobber them just to be sure nobody does something stupid
- like assuming they have some legal value. */
- asm("" : "=r" (p4), "=r" (p5));
-
- XMMS_RESTORE;
-}
-
static struct xor_block_template xor_block_pIII_sse = {
.name = "pIII_sse",
.do_2 = xor_sse_2,
@@ -861,28 +537,31 @@ static struct xor_block_template xor_block_pIII_sse = {
.do_5 = xor_sse_5,
};
+/* Also try the AVX routines */
+#include <asm/xor_avx.h>
+
/* Also try the generic routines. */
#include <asm-generic/xor.h>
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
#undef XOR_TRY_TEMPLATES
#define XOR_TRY_TEMPLATES \
do { \
- xor_speed(&xor_block_8regs); \
- xor_speed(&xor_block_8regs_p); \
- xor_speed(&xor_block_32regs); \
- xor_speed(&xor_block_32regs_p); \
- if (cpu_has_xmm) \
+ AVX_XOR_SPEED; \
+ if (cpu_has_xmm) { \
xor_speed(&xor_block_pIII_sse); \
- if (cpu_has_mmx) { \
+ xor_speed(&xor_block_sse_pf64); \
+ } else if (cpu_has_mmx) { \
xor_speed(&xor_block_pII_mmx); \
xor_speed(&xor_block_p5_mmx); \
+ } else { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
} \
} while (0)
-/* We force the use of the SSE xor block because it can write around L2.
- We may also be able to load into the L1 only depending on how the cpu
- deals with a load to a line that is being prefetched. */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
- (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
-
#endif /* _ASM_X86_XOR_32_H */
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h
index 1549b5e261f..546f1e3b87c 100644
--- a/arch/x86/include/asm/xor_64.h
+++ b/arch/x86/include/asm/xor_64.h
@@ -1,344 +1,6 @@
#ifndef _ASM_X86_XOR_64_H
#define _ASM_X86_XOR_64_H
-/*
- * Optimized RAID-5 checksumming functions for MMX and SSE.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-/*
- * Based on
- * High-speed RAID5 checksumming functions utilizing SSE instructions.
- * Copyright (C) 1998 Ingo Molnar.
- */
-
-/*
- * x86-64 changes / gcc fixes from Andi Kleen.
- * Copyright 2002 Andi Kleen, SuSE Labs.
- *
- * This hasn't been optimized for the hammer yet, but there are likely
- * no advantages to be gotten from x86-64 here anyways.
- */
-
-typedef struct {
- unsigned long a, b;
-} __attribute__((aligned(16))) xmm_store_t;
-
-/* Doesn't use gcc to save the XMM registers, because there is no easy way to
- tell it to do a clts before the register saving. */
-#define XMMS_SAVE \
-do { \
- preempt_disable(); \
- asm volatile( \
- "movq %%cr0,%0 ;\n\t" \
- "clts ;\n\t" \
- "movups %%xmm0,(%1) ;\n\t" \
- "movups %%xmm1,0x10(%1) ;\n\t" \
- "movups %%xmm2,0x20(%1) ;\n\t" \
- "movups %%xmm3,0x30(%1) ;\n\t" \
- : "=&r" (cr0) \
- : "r" (xmm_save) \
- : "memory"); \
-} while (0)
-
-#define XMMS_RESTORE \
-do { \
- asm volatile( \
- "sfence ;\n\t" \
- "movups (%1),%%xmm0 ;\n\t" \
- "movups 0x10(%1),%%xmm1 ;\n\t" \
- "movups 0x20(%1),%%xmm2 ;\n\t" \
- "movups 0x30(%1),%%xmm3 ;\n\t" \
- "movq %0,%%cr0 ;\n\t" \
- : \
- : "r" (cr0), "r" (xmm_save) \
- : "memory"); \
- preempt_enable(); \
-} while (0)
-
-#define OFFS(x) "16*("#x")"
-#define PF_OFFS(x) "256+16*("#x")"
-#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
-#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
-#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
-#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
-#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
-#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
-#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
-#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
-#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
-#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
-#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
-#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
-#define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
- unsigned int lines = bytes >> 8;
- unsigned long cr0;
- xmm_store_t xmm_save[4];
-
- XMMS_SAVE;
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- LD(i, 0) \
- LD(i + 1, 1) \
- PF1(i) \
- PF1(i + 2) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO1(i, 0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- ST(i, 0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
- : [inc] "r" (256UL)
- : "memory");
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
-{
- unsigned int lines = bytes >> 8;
- xmm_store_t xmm_save[4];
- unsigned long cr0;
-
- XMMS_SAVE;
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i, 0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO1(i, 0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- XO2(i, 0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- ST(i, 0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [cnt] "+r" (lines),
- [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
- : [inc] "r" (256UL)
- : "memory");
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
-{
- unsigned int lines = bytes >> 8;
- xmm_store_t xmm_save[4];
- unsigned long cr0;
-
- XMMS_SAVE;
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i, 0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- XO1(i, 0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- PF3(i) \
- PF3(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO2(i, 0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- XO3(i, 0) \
- XO3(i + 1, 1) \
- XO3(i + 2, 2) \
- XO3(i + 3, 3) \
- ST(i, 0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " addq %[inc], %[p4] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [cnt] "+c" (lines),
- [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
- : [inc] "r" (256UL)
- : "memory" );
-
- XMMS_RESTORE;
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
- unsigned int lines = bytes >> 8;
- xmm_store_t xmm_save[4];
- unsigned long cr0;
-
- XMMS_SAVE;
-
- asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
- PF1(i) \
- PF1(i + 2) \
- LD(i, 0) \
- LD(i + 1, 1) \
- LD(i + 2, 2) \
- LD(i + 3, 3) \
- PF2(i) \
- PF2(i + 2) \
- XO1(i, 0) \
- XO1(i + 1, 1) \
- XO1(i + 2, 2) \
- XO1(i + 3, 3) \
- PF3(i) \
- PF3(i + 2) \
- XO2(i, 0) \
- XO2(i + 1, 1) \
- XO2(i + 2, 2) \
- XO2(i + 3, 3) \
- PF4(i) \
- PF4(i + 2) \
- PF0(i + 4) \
- PF0(i + 6) \
- XO3(i, 0) \
- XO3(i + 1, 1) \
- XO3(i + 2, 2) \
- XO3(i + 3, 3) \
- XO4(i, 0) \
- XO4(i + 1, 1) \
- XO4(i + 2, 2) \
- XO4(i + 3, 3) \
- ST(i, 0) \
- ST(i + 1, 1) \
- ST(i + 2, 2) \
- ST(i + 3, 3) \
-
-
- PF0(0)
- PF0(2)
-
- " .align 32 ;\n"
- " 1: ;\n"
-
- BLOCK(0)
- BLOCK(4)
- BLOCK(8)
- BLOCK(12)
-
- " addq %[inc], %[p1] ;\n"
- " addq %[inc], %[p2] ;\n"
- " addq %[inc], %[p3] ;\n"
- " addq %[inc], %[p4] ;\n"
- " addq %[inc], %[p5] ;\n"
- " decl %[cnt] ; jnz 1b"
- : [cnt] "+c" (lines),
- [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
- [p5] "+r" (p5)
- : [inc] "r" (256UL)
- : "memory");
-
- XMMS_RESTORE;
-}
-
static struct xor_block_template xor_block_sse = {
.name = "generic_sse",
.do_2 = xor_sse_2,
@@ -347,15 +9,19 @@ static struct xor_block_template xor_block_sse = {
.do_5 = xor_sse_5,
};
+
+/* Also try the AVX routines */
+#include <asm/xor_avx.h>
+
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
#undef XOR_TRY_TEMPLATES
#define XOR_TRY_TEMPLATES \
do { \
+ AVX_XOR_SPEED; \
+ xor_speed(&xor_block_sse_pf64); \
xor_speed(&xor_block_sse); \
} while (0)
-/* We force the use of the SSE xor block because it can write around L2.
- We may also be able to load into the L1 only depending on how the cpu
- deals with a load to a line that is being prefetched. */
-#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
-
#endif /* _ASM_X86_XOR_64_H */
diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h
new file mode 100644
index 00000000000..492b29802f5
--- /dev/null
+++ b/arch/x86/include/asm/xor_avx.h
@@ -0,0 +1,184 @@
+#ifndef _ASM_X86_XOR_AVX_H
+#define _ASM_X86_XOR_AVX_H
+
+/*
+ * Optimized RAID-5 checksumming functions for AVX
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
+ *
+ * Based on Ingo Molnar and Zach Brown's respective MMX and SSE routines
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifdef CONFIG_AS_AVX
+
+#include <linux/compiler.h>
+#include <asm/i387.h>
+
+#define BLOCK4(i) \
+ BLOCK(32 * i, 0) \
+ BLOCK(32 * (i + 1), 1) \
+ BLOCK(32 * (i + 2), 2) \
+ BLOCK(32 * (i + 3), 3)
+
+#define BLOCK16() \
+ BLOCK4(0) \
+ BLOCK4(4) \
+ BLOCK4(8) \
+ BLOCK4(12)
+
+static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1)
+{
+ unsigned long lines = bytes >> 9;
+
+ kernel_fpu_begin();
+
+ while (lines--) {
+#undef BLOCK
+#define BLOCK(i, reg) \
+do { \
+ asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p1[i / sizeof(*p1)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p0[i / sizeof(*p0)])); \
+ asm volatile("vmovdqa %%ymm" #reg ", %0" : \
+ "=m" (p0[i / sizeof(*p0)])); \
+} while (0);
+
+ BLOCK16()
+
+ p0 = (unsigned long *)((uintptr_t)p0 + 512);
+ p1 = (unsigned long *)((uintptr_t)p1 + 512);
+ }
+
+ kernel_fpu_end();
+}
+
+static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1,
+ unsigned long *p2)
+{
+ unsigned long lines = bytes >> 9;
+
+ kernel_fpu_begin();
+
+ while (lines--) {
+#undef BLOCK
+#define BLOCK(i, reg) \
+do { \
+ asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p2[i / sizeof(*p2)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p1[i / sizeof(*p1)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p0[i / sizeof(*p0)])); \
+ asm volatile("vmovdqa %%ymm" #reg ", %0" : \
+ "=m" (p0[i / sizeof(*p0)])); \
+} while (0);
+
+ BLOCK16()
+
+ p0 = (unsigned long *)((uintptr_t)p0 + 512);
+ p1 = (unsigned long *)((uintptr_t)p1 + 512);
+ p2 = (unsigned long *)((uintptr_t)p2 + 512);
+ }
+
+ kernel_fpu_end();
+}
+
+static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1,
+ unsigned long *p2, unsigned long *p3)
+{
+ unsigned long lines = bytes >> 9;
+
+ kernel_fpu_begin();
+
+ while (lines--) {
+#undef BLOCK
+#define BLOCK(i, reg) \
+do { \
+ asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p3[i / sizeof(*p3)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p2[i / sizeof(*p2)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p1[i / sizeof(*p1)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p0[i / sizeof(*p0)])); \
+ asm volatile("vmovdqa %%ymm" #reg ", %0" : \
+ "=m" (p0[i / sizeof(*p0)])); \
+} while (0);
+
+ BLOCK16();
+
+ p0 = (unsigned long *)((uintptr_t)p0 + 512);
+ p1 = (unsigned long *)((uintptr_t)p1 + 512);
+ p2 = (unsigned long *)((uintptr_t)p2 + 512);
+ p3 = (unsigned long *)((uintptr_t)p3 + 512);
+ }
+
+ kernel_fpu_end();
+}
+
+static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1,
+ unsigned long *p2, unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 9;
+
+ kernel_fpu_begin();
+
+ while (lines--) {
+#undef BLOCK
+#define BLOCK(i, reg) \
+do { \
+ asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p4[i / sizeof(*p4)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p3[i / sizeof(*p3)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p2[i / sizeof(*p2)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p1[i / sizeof(*p1)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p0[i / sizeof(*p0)])); \
+ asm volatile("vmovdqa %%ymm" #reg ", %0" : \
+ "=m" (p0[i / sizeof(*p0)])); \
+} while (0);
+
+ BLOCK16()
+
+ p0 = (unsigned long *)((uintptr_t)p0 + 512);
+ p1 = (unsigned long *)((uintptr_t)p1 + 512);
+ p2 = (unsigned long *)((uintptr_t)p2 + 512);
+ p3 = (unsigned long *)((uintptr_t)p3 + 512);
+ p4 = (unsigned long *)((uintptr_t)p4 + 512);
+ }
+
+ kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_avx = {
+ .name = "avx",
+ .do_2 = xor_avx_2,
+ .do_3 = xor_avx_3,
+ .do_4 = xor_avx_4,
+ .do_5 = xor_avx_5,
+};
+
+#define AVX_XOR_SPEED \
+do { \
+ if (cpu_has_avx && cpu_has_osxsave) \
+ xor_speed(&xor_block_avx); \
+} while (0)
+
+#define AVX_SELECT(FASTEST) \
+ (cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST)
+
+#else
+
+#define AVX_XOR_SPEED {}
+
+#define AVX_SELECT(FASTEST) (FASTEST)
+
+#endif
+#endif
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index c6ce2452f10..d949ef28c48 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -6,11 +6,18 @@
#define XSTATE_CPUID 0x0000000d
-#define XSTATE_FP 0x1
-#define XSTATE_SSE 0x2
-#define XSTATE_YMM 0x4
+#define XSTATE_FP 0x1
+#define XSTATE_SSE 0x2
+#define XSTATE_YMM 0x4
+#define XSTATE_BNDREGS 0x8
+#define XSTATE_BNDCSR 0x10
+#define XSTATE_OPMASK 0x20
+#define XSTATE_ZMM_Hi256 0x40
+#define XSTATE_Hi16_ZMM 0x80
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
+/* Bit 63 of XCR0 is reserved for future expansion */
+#define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
#define FXSAVE_SIZE 512
@@ -20,10 +27,15 @@
#define XSAVE_YMM_SIZE 256
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
-/*
- * These are the features that the OS can handle currently.
- */
-#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
+/* Supported features which support lazy state saving */
+#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
+ | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
+
+/* Supported features which require eager state saving */
+#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
+
+/* All currently supported features */
+#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
#ifdef CONFIG_X86_64
#define REX_PREFIX "0x48, "
@@ -34,17 +46,14 @@
extern unsigned int xstate_size;
extern u64 pcntxt_mask;
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
+extern struct xsave_struct *init_xstate_buf;
extern void xsave_init(void);
extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
extern int init_fpu(struct task_struct *child);
-extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
- void __user *fpstate,
- struct _fpx_sw_bytes *sw);
-static inline int fpu_xrstor_checking(struct fpu *fpu)
+static inline int fpu_xrstor_checking(struct xsave_struct *fx)
{
- struct xsave_struct *fx = &fpu->state->xsave;
int err;
asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -69,27 +78,21 @@ static inline int xsave_user(struct xsave_struct __user *buf)
* Clear the xsave header first, so that reserved fields are
* initialized to zero.
*/
- err = __clear_user(&buf->xsave_hdr,
- sizeof(struct xsave_hdr_struct));
+ err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
if (unlikely(err))
return -EFAULT;
- __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
- "2:\n"
+ __asm__ __volatile__(ASM_STAC "\n"
+ "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
+ "2: " ASM_CLAC "\n"
".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
- ".section __ex_table,\"a\"\n"
- _ASM_ALIGN "\n"
- _ASM_PTR "1b,3b\n"
- ".previous"
+ _ASM_EXTABLE(1b,3b)
: [err] "=r" (err)
: "D" (buf), "a" (-1), "d" (-1), "0" (0)
: "memory");
- if (unlikely(err) && __clear_user(buf, xstate_size))
- err = -EFAULT;
- /* No need to clear here because the caller clears USED_MATH */
return err;
}
@@ -100,16 +103,14 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
u32 lmask = mask;
u32 hmask = mask >> 32;
- __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
- "2:\n"
+ __asm__ __volatile__(ASM_STAC "\n"
+ "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+ "2: " ASM_CLAC "\n"
".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
- ".section __ex_table,\"a\"\n"
- _ASM_ALIGN "\n"
- _ASM_PTR "1b,3b\n"
- ".previous"
+ _ASM_EXTABLE(1b,3b)
: [err] "=r" (err)
: "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
: "memory"); /* memory required? */