aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-06-29 00:21:34 +0000
committerAvi Kivity <avi@redhat.com>2011-07-12 13:16:54 +0300
commitde56a948b9182fbcf92cb8212f114de096c2d574 (patch)
tree633ab73672aa2543b683686fc8fb023629c5f8f8 /arch/powerpc/kernel
parent3c42bf8a717cb636e0ed2ed77194669e2ac3ed56 (diff)
KVM: PPC: Add support for Book3S processors in hypervisor mode
This adds support for KVM running on 64-bit Book 3S processors, specifically POWER7, in hypervisor mode. Using hypervisor mode means that the guest can use the processor's supervisor mode. That means that the guest can execute privileged instructions and access privileged registers itself without trapping to the host. This gives excellent performance, but does mean that KVM cannot emulate a processor architecture other than the one that the hardware implements. This code assumes that the guest is running paravirtualized using the PAPR (Power Architecture Platform Requirements) interface, which is the interface that IBM's PowerVM hypervisor uses. That means that existing Linux distributions that run on IBM pSeries machines will also run under KVM without modification. In order to communicate the PAPR hypercalls to qemu, this adds a new KVM_EXIT_PAPR_HCALL exit code to include/linux/kvm.h. Currently the choice between book3s_hv support and book3s_pr support (i.e. the existing code, which runs the guest in user mode) has to be made at kernel configuration time, so a given kernel binary can only do one or the other. This new book3s_hv code doesn't support MMIO emulation at present. Since we are running paravirtualized guests, this isn't a serious restriction. With the guest running in supervisor mode, most exceptions go straight to the guest. We will never get data or instruction storage or segment interrupts, alignment interrupts, decrementer interrupts, program interrupts, single-step interrupts, etc., coming to the hypervisor from the guest. Therefore this introduces a new KVMTEST_NONHV macro for the exception entry path so that we don't have to do the KVM test on entry to those exception handlers. We do however get hypervisor decrementer, hypervisor data storage, hypervisor instruction storage, and hypervisor emulation assist interrupts, so we have to handle those. In hypervisor mode, real-mode accesses can access all of RAM, not just a limited amount. Therefore we put all the guest state in the vcpu.arch and use the shadow_vcpu in the PACA only for temporary scratch space. We allocate the vcpu with kzalloc rather than vzalloc, and we don't use anything in the kvmppc_vcpu_book3s struct, so we don't allocate it. We don't have a shared page with the guest, but we still need a kvm_vcpu_arch_shared struct to store the values of various registers, so we include one in the vcpu_arch struct. The POWER7 processor has a restriction that all threads in a core have to be in the same partition. MMU-on kernel code counts as a partition (partition 0), so we have to do a partition switch on every entry to and exit from the guest. At present we require the host and guest to run in single-thread mode because of this hardware restriction. This code allocates a hashed page table for the guest and initializes it with HPTEs for the guest's Virtual Real Memory Area (VRMA). We require that the guest memory is allocated using 16MB huge pages, in order to simplify the low-level memory management. This also means that we can get away without tracking paging activity in the host for now, since huge pages can't be paged or swapped. This also adds a few new exports needed by the book3s_hv code. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c79
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S60
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/smp.c1
5 files changed, 118 insertions, 28 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index dabfb7346f3..936267462ca 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -187,6 +187,7 @@ int main(void)
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
+ DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
#endif /* CONFIG_PPC_STD_MMU_64 */
@@ -392,6 +393,29 @@ int main(void)
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
+ DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
+ DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
+#ifdef CONFIG_ALTIVEC
+ DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
+ DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
+#endif
+#ifdef CONFIG_VSX
+ DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
+#endif
+ DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+ DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+ DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
+ DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
+ DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
+ DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0));
+ DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1));
+ DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
+ DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
+#endif
DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
@@ -403,17 +427,60 @@ int main(void)
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
/* book3s */
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
+ DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
+ DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
+ DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
+ DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
+ DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
+ DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
+ DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
+ DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
+ DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
+#endif
#ifdef CONFIG_PPC_BOOK3S
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
+ DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
+ DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
+ DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
+ DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
+ DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
+ DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
+ DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
+ DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
+ DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
+ DEFINE(VCPU_LPCR, offsetof(struct kvm_vcpu, arch.lpcr));
+ DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
+ DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+ DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
+ DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
+ DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
+ DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
+ DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
+ DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
+ DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+ DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
+ DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
+ offsetof(struct kvmppc_vcpu_book3s, vcpu));
+ DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
+ DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
+ DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_KVM_BOOK3S_PR
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
+#else
+# define SVCPU_FIELD(x, f)
+#endif
# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
#else /* 32-bit */
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
@@ -453,11 +520,23 @@ int main(void)
HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
+ HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
+ HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
+ HSTATE_FIELD(HSTATE_PMC, host_pmc);
+ HSTATE_FIELD(HSTATE_PURR, host_purr);
+ HSTATE_FIELD(HSTATE_SPURR, host_spurr);
+ HSTATE_FIELD(HSTATE_DSCR, host_dscr);
+ HSTATE_FIELD(HSTATE_DABR, dabr);
+ HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
#else /* CONFIG_PPC_BOOK3S */
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 6da00550afe..163c041cec2 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -87,14 +87,14 @@ data_access_not_stab:
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
#endif
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
- KVMTEST, 0x300)
+ KVMTEST_PR, 0x300)
. = 0x380
.globl data_access_slb_pSeries
data_access_slb_pSeries:
HMT_MEDIUM
SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
#ifdef __DISABLED__
@@ -125,7 +125,7 @@ data_access_slb_pSeries:
instruction_access_slb_pSeries:
HMT_MEDIUM
SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
#ifdef __DISABLED__
@@ -153,32 +153,32 @@ instruction_access_slb_pSeries:
hardware_interrupt_pSeries:
hardware_interrupt_hv:
BEGIN_FTR_SECTION
- _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
- EXC_STD, SOFTEN_TEST)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
- FTR_SECTION_ELSE
_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
EXC_HV, SOFTEN_TEST_HV)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
- ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206)
+ FTR_SECTION_ELSE
+ _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
+ EXC_STD, SOFTEN_TEST_PR)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE_206)
STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
. = 0xc00
.globl system_call_pSeries
@@ -219,7 +219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
b .
STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
* out of line to handle them
@@ -254,23 +254,23 @@ vsx_unavailable_pSeries_1:
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
#endif /* CONFIG_CBE_RAS */
STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
#endif /* CONFIG_CBE_RAS */
STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
#endif /* CONFIG_CBE_RAS */
. = 0x3000
@@ -297,7 +297,7 @@ data_access_check_stab:
mfspr r9,SPRN_DSISR
srdi r10,r10,60
rlwimi r10,r9,16,0x20
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR
lbz r9,HSTATE_IN_GUEST(r13)
rlwimi r10,r9,8,0x300
#endif
@@ -316,11 +316,11 @@ do_stab_bolted_pSeries:
EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
#endif /* CONFIG_POWER4_ONLY */
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
- KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400)
- KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
+ KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
+ KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
.align 7
@@ -336,11 +336,11 @@ do_stab_bolted_pSeries:
/* moved from 0xf00 */
STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
- KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
/*
* An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -417,7 +417,11 @@ slb_miss_user_pseries:
/* KVM's trampoline code needs to be close to the interrupt handlers */
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR
#include "../kvm/book3s_rmhandlers.S"
+#else
+#include "../kvm/book3s_hv_rmhandlers.S"
+#endif
#endif
.align 7
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 60ac2a9251d..ec2d0edeb13 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -96,6 +96,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_fp_to_thread);
void enable_kernel_fp(void)
{
@@ -145,6 +146,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
@@ -186,6 +188,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 79fca2651b6..22051ef04bd 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -375,6 +375,9 @@ void __init check_for_initrd(void)
int threads_per_core, threads_shift;
cpumask_t threads_core_mask;
+EXPORT_SYMBOL_GPL(threads_per_core);
+EXPORT_SYMBOL_GPL(threads_shift);
+EXPORT_SYMBOL_GPL(threads_core_mask);
static void __init cpu_init_thread_core_maps(int tpc)
{
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ebc6700b98..09a85a9045d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -243,6 +243,7 @@ void smp_send_reschedule(int cpu)
if (likely(smp_ops))
smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
void arch_send_call_function_single_ipi(int cpu)
{