diff options
Diffstat (limited to 'arch/powerpc/kernel/kvm.c')
| -rw-r--r-- | arch/powerpc/kernel/kvm.c | 394 | 
1 files changed, 270 insertions, 124 deletions
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index b06bdae0406..33aa4ddf597 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -1,5 +1,6 @@  /*   * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. + * Copyright 2010-2011 Freescale Semiconductor, Inc.   *   * Authors:   *     Alexander Graf <agraf@suse.de> @@ -20,6 +21,7 @@  #include <linux/kvm_host.h>  #include <linux/init.h> +#include <linux/export.h>  #include <linux/kvm_para.h>  #include <linux/slab.h>  #include <linux/of.h> @@ -28,6 +30,8 @@  #include <asm/sections.h>  #include <asm/cacheflush.h>  #include <asm/disassemble.h> +#include <asm/ppc-opcode.h> +#include <asm/epapr_hcalls.h>  #define KVM_MAGIC_PAGE		(-4096L)  #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x) @@ -40,41 +44,37 @@  #define KVM_INST_B		0x48000000  #define KVM_INST_B_MASK		0x03ffffff  #define KVM_INST_B_MAX		0x01ffffff +#define KVM_INST_LI		0x38000000  #define KVM_MASK_RT		0x03e00000  #define KVM_RT_30		0x03c00000  #define KVM_MASK_RB		0x0000f800  #define KVM_INST_MFMSR		0x7c0000a6 -#define KVM_INST_MFSPR_SPRG0	0x7c1042a6 -#define KVM_INST_MFSPR_SPRG1	0x7c1142a6 -#define KVM_INST_MFSPR_SPRG2	0x7c1242a6 -#define KVM_INST_MFSPR_SPRG3	0x7c1342a6 -#define KVM_INST_MFSPR_SRR0	0x7c1a02a6 -#define KVM_INST_MFSPR_SRR1	0x7c1b02a6 -#define KVM_INST_MFSPR_DAR	0x7c1302a6 -#define KVM_INST_MFSPR_DSISR	0x7c1202a6 - -#define KVM_INST_MTSPR_SPRG0	0x7c1043a6 -#define KVM_INST_MTSPR_SPRG1	0x7c1143a6 -#define KVM_INST_MTSPR_SPRG2	0x7c1243a6 -#define KVM_INST_MTSPR_SPRG3	0x7c1343a6 -#define KVM_INST_MTSPR_SRR0	0x7c1a03a6 -#define KVM_INST_MTSPR_SRR1	0x7c1b03a6 -#define KVM_INST_MTSPR_DAR	0x7c1303a6 -#define KVM_INST_MTSPR_DSISR	0x7c1203a6 + +#define SPR_FROM		0 +#define SPR_TO			0x100 + +#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \ +				    (((sprn) & 0x1f) << 16) | \ +				    (((sprn) & 0x3e0) << 6) | \ +				    (moveto)) + +#define KVM_INST_MFSPR(sprn)	KVM_INST_SPR(sprn, SPR_FROM) +#define KVM_INST_MTSPR(sprn)	KVM_INST_SPR(sprn, SPR_TO)  #define KVM_INST_TLBSYNC	0x7c00046c  #define KVM_INST_MTMSRD_L0	0x7c000164  #define KVM_INST_MTMSRD_L1	0x7c010164  #define KVM_INST_MTMSR		0x7c000124 +#define KVM_INST_WRTEE		0x7c000106  #define KVM_INST_WRTEEI_0	0x7c000146  #define KVM_INST_WRTEEI_1	0x7c008146  #define KVM_INST_MTSRIN		0x7c0001e4  static bool kvm_patching_worked = true; -static char kvm_tmp[1024 * 1024]; +char kvm_tmp[1024 * 1024];  static int kvm_tmp_index;  static inline void kvm_patch_ins(u32 *inst, u32 new_inst) @@ -131,7 +131,6 @@ static void kvm_patch_ins_b(u32 *inst, int addr)  	/* On relocatable kernels interrupts handlers and our code  	   can be in different regions, so we don't patch them */ -	extern u32 __end_interrupts;  	if ((ulong)inst < (ulong)&__end_interrupts)  		return;  #endif @@ -270,26 +269,27 @@ static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)  #ifdef CONFIG_BOOKE -extern u32 kvm_emulate_wrteei_branch_offs; -extern u32 kvm_emulate_wrteei_ee_offs; -extern u32 kvm_emulate_wrteei_len; -extern u32 kvm_emulate_wrteei[]; +extern u32 kvm_emulate_wrtee_branch_offs; +extern u32 kvm_emulate_wrtee_reg_offs; +extern u32 kvm_emulate_wrtee_orig_ins_offs; +extern u32 kvm_emulate_wrtee_len; +extern u32 kvm_emulate_wrtee[]; -static void kvm_patch_ins_wrteei(u32 *inst) +static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)  {  	u32 *p;  	int distance_start;  	int distance_end;  	ulong next_inst; -	p = kvm_alloc(kvm_emulate_wrteei_len * 4); +	p = kvm_alloc(kvm_emulate_wrtee_len * 4);  	if (!p)  		return;  	/* Find out where we are and put everything there */  	distance_start = (ulong)p - (ulong)inst;  	next_inst = ((ulong)inst + 4); -	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs]; +	distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];  	/* Make sure we only write valid b instructions */  	if (distance_start > KVM_INST_B_MAX) { @@ -298,10 +298,65 @@ static void kvm_patch_ins_wrteei(u32 *inst)  	}  	/* Modify the chunk to fit the invocation */ -	memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4); -	p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK; -	p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE); -	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4); +	memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4); +	p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK; + +	if (imm_one) { +		p[kvm_emulate_wrtee_reg_offs] = +			KVM_INST_LI | __PPC_RT(R30) | MSR_EE; +	} else { +		/* Make clobbered registers work too */ +		switch (get_rt(rt)) { +		case 30: +			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], +					 magic_var(scratch2), KVM_RT_30); +			break; +		case 31: +			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], +					 magic_var(scratch1), KVM_RT_30); +			break; +		default: +			p[kvm_emulate_wrtee_reg_offs] |= rt; +			break; +		} +	} + +	p[kvm_emulate_wrtee_orig_ins_offs] = *inst; +	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4); + +	/* Patch the invocation */ +	kvm_patch_ins_b(inst, distance_start); +} + +extern u32 kvm_emulate_wrteei_0_branch_offs; +extern u32 kvm_emulate_wrteei_0_len; +extern u32 kvm_emulate_wrteei_0[]; + +static void kvm_patch_ins_wrteei_0(u32 *inst) +{ +	u32 *p; +	int distance_start; +	int distance_end; +	ulong next_inst; + +	p = kvm_alloc(kvm_emulate_wrteei_0_len * 4); +	if (!p) +		return; + +	/* Find out where we are and put everything there */ +	distance_start = (ulong)p - (ulong)inst; +	next_inst = ((ulong)inst + 4); +	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs]; + +	/* Make sure we only write valid b instructions */ +	if (distance_start > KVM_INST_B_MAX) { +		kvm_patching_worked = false; +		return; +	} + +	memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4); +	p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK; +	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);  	/* Patch the invocation */  	kvm_patch_ins_b(inst, distance_start); @@ -358,13 +413,13 @@ static void kvm_map_magic_page(void *data)  {  	u32 *features = data; -	ulong in[8]; +	ulong in[8] = {0};  	ulong out[8];  	in[0] = KVM_MAGIC_PAGE; -	in[1] = KVM_MAGIC_PAGE; +	in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX; -	kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE); +	epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));  	*features = out[0];  } @@ -380,56 +435,191 @@ static void kvm_check_ins(u32 *inst, u32 features)  	case KVM_INST_MFMSR:  		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);  		break; -	case KVM_INST_MFSPR_SPRG0: +	case KVM_INST_MFSPR(SPRN_SPRG0):  		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);  		break; -	case KVM_INST_MFSPR_SPRG1: +	case KVM_INST_MFSPR(SPRN_SPRG1):  		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);  		break; -	case KVM_INST_MFSPR_SPRG2: +	case KVM_INST_MFSPR(SPRN_SPRG2):  		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);  		break; -	case KVM_INST_MFSPR_SPRG3: +	case KVM_INST_MFSPR(SPRN_SPRG3):  		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);  		break; -	case KVM_INST_MFSPR_SRR0: +	case KVM_INST_MFSPR(SPRN_SRR0):  		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);  		break; -	case KVM_INST_MFSPR_SRR1: +	case KVM_INST_MFSPR(SPRN_SRR1):  		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);  		break; -	case KVM_INST_MFSPR_DAR: +#ifdef CONFIG_BOOKE +	case KVM_INST_MFSPR(SPRN_DEAR): +#else +	case KVM_INST_MFSPR(SPRN_DAR): +#endif  		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);  		break; -	case KVM_INST_MFSPR_DSISR: +	case KVM_INST_MFSPR(SPRN_DSISR):  		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);  		break; +#ifdef CONFIG_PPC_BOOK3E_MMU +	case KVM_INST_MFSPR(SPRN_MAS0): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt); +		break; +	case KVM_INST_MFSPR(SPRN_MAS1): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt); +		break; +	case KVM_INST_MFSPR(SPRN_MAS2): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt); +		break; +	case KVM_INST_MFSPR(SPRN_MAS3): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt); +		break; +	case KVM_INST_MFSPR(SPRN_MAS4): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt); +		break; +	case KVM_INST_MFSPR(SPRN_MAS6): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt); +		break; +	case KVM_INST_MFSPR(SPRN_MAS7): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt); +		break; +#endif /* CONFIG_PPC_BOOK3E_MMU */ + +	case KVM_INST_MFSPR(SPRN_SPRG4): +#ifdef CONFIG_BOOKE +	case KVM_INST_MFSPR(SPRN_SPRG4R): +#endif +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt); +		break; +	case KVM_INST_MFSPR(SPRN_SPRG5): +#ifdef CONFIG_BOOKE +	case KVM_INST_MFSPR(SPRN_SPRG5R): +#endif +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt); +		break; +	case KVM_INST_MFSPR(SPRN_SPRG6): +#ifdef CONFIG_BOOKE +	case KVM_INST_MFSPR(SPRN_SPRG6R): +#endif +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt); +		break; +	case KVM_INST_MFSPR(SPRN_SPRG7): +#ifdef CONFIG_BOOKE +	case KVM_INST_MFSPR(SPRN_SPRG7R): +#endif +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt); +		break; + +#ifdef CONFIG_BOOKE +	case KVM_INST_MFSPR(SPRN_ESR): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt); +		break; +#endif + +	case KVM_INST_MFSPR(SPRN_PIR): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt); +		break; + +  	/* Stores */ -	case KVM_INST_MTSPR_SPRG0: +	case KVM_INST_MTSPR(SPRN_SPRG0):  		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);  		break; -	case KVM_INST_MTSPR_SPRG1: +	case KVM_INST_MTSPR(SPRN_SPRG1):  		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);  		break; -	case KVM_INST_MTSPR_SPRG2: +	case KVM_INST_MTSPR(SPRN_SPRG2):  		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);  		break; -	case KVM_INST_MTSPR_SPRG3: +	case KVM_INST_MTSPR(SPRN_SPRG3):  		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);  		break; -	case KVM_INST_MTSPR_SRR0: +	case KVM_INST_MTSPR(SPRN_SRR0):  		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);  		break; -	case KVM_INST_MTSPR_SRR1: +	case KVM_INST_MTSPR(SPRN_SRR1):  		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);  		break; -	case KVM_INST_MTSPR_DAR: +#ifdef CONFIG_BOOKE +	case KVM_INST_MTSPR(SPRN_DEAR): +#else +	case KVM_INST_MTSPR(SPRN_DAR): +#endif  		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);  		break; -	case KVM_INST_MTSPR_DSISR: +	case KVM_INST_MTSPR(SPRN_DSISR):  		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);  		break; +#ifdef CONFIG_PPC_BOOK3E_MMU +	case KVM_INST_MTSPR(SPRN_MAS0): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt); +		break; +	case KVM_INST_MTSPR(SPRN_MAS1): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt); +		break; +	case KVM_INST_MTSPR(SPRN_MAS2): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt); +		break; +	case KVM_INST_MTSPR(SPRN_MAS3): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt); +		break; +	case KVM_INST_MTSPR(SPRN_MAS4): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt); +		break; +	case KVM_INST_MTSPR(SPRN_MAS6): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt); +		break; +	case KVM_INST_MTSPR(SPRN_MAS7): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt); +		break; +#endif /* CONFIG_PPC_BOOK3E_MMU */ + +	case KVM_INST_MTSPR(SPRN_SPRG4): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt); +		break; +	case KVM_INST_MTSPR(SPRN_SPRG5): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt); +		break; +	case KVM_INST_MTSPR(SPRN_SPRG6): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt); +		break; +	case KVM_INST_MTSPR(SPRN_SPRG7): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt); +		break; + +#ifdef CONFIG_BOOKE +	case KVM_INST_MTSPR(SPRN_ESR): +		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) +			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt); +		break; +#endif  	/* Nops */  	case KVM_INST_TLBSYNC: @@ -444,6 +634,11 @@ static void kvm_check_ins(u32 *inst, u32 features)  	case KVM_INST_MTMSRD_L0:  		kvm_patch_ins_mtmsr(inst, inst_rt);  		break; +#ifdef CONFIG_BOOKE +	case KVM_INST_WRTEE: +		kvm_patch_ins_wrtee(inst, inst_rt, 0); +		break; +#endif  	}  	switch (inst_no_rt & ~KVM_MASK_RB) { @@ -461,13 +656,19 @@ static void kvm_check_ins(u32 *inst, u32 features)  	switch (_inst) {  #ifdef CONFIG_BOOKE  	case KVM_INST_WRTEEI_0: +		kvm_patch_ins_wrteei_0(inst); +		break; +  	case KVM_INST_WRTEEI_1: -		kvm_patch_ins_wrteei(inst); +		kvm_patch_ins_wrtee(inst, 0, 1);  		break;  #endif  	}  } +extern u32 kvm_template_start[]; +extern u32 kvm_template_end[]; +  static void kvm_use_magic_page(void)  {  	u32 *p; @@ -488,87 +689,32 @@ static void kvm_use_magic_page(void)  	start = (void*)_stext;  	end = (void*)_etext; -	for (p = start; p < end; p++) +	/* +	 * Being interrupted in the middle of patching would +	 * be bad for SPRG4-7, which KVM can't keep in sync +	 * with emulated accesses because reads don't trap. +	 */ +	local_irq_disable(); + +	for (p = start; p < end; p++) { +		/* Avoid patching the template code */ +		if (p >= kvm_template_start && p < kvm_template_end) { +			p = kvm_template_end - 1; +			continue; +		}  		kvm_check_ins(p, features); +	} + +	local_irq_enable();  	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",  			 kvm_patching_worked ? "worked" : "failed");  } -unsigned long kvm_hypercall(unsigned long *in, -			    unsigned long *out, -			    unsigned long nr) -{ -	unsigned long register r0 asm("r0"); -	unsigned long register r3 asm("r3") = in[0]; -	unsigned long register r4 asm("r4") = in[1]; -	unsigned long register r5 asm("r5") = in[2]; -	unsigned long register r6 asm("r6") = in[3]; -	unsigned long register r7 asm("r7") = in[4]; -	unsigned long register r8 asm("r8") = in[5]; -	unsigned long register r9 asm("r9") = in[6]; -	unsigned long register r10 asm("r10") = in[7]; -	unsigned long register r11 asm("r11") = nr; -	unsigned long register r12 asm("r12"); - -	asm volatile("bl	kvm_hypercall_start" -		     : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), -		       "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), -		       "=r"(r12) -		     : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8), -		       "r"(r9), "r"(r10), "r"(r11) -		     : "memory", "cc", "xer", "ctr", "lr"); - -	out[0] = r4; -	out[1] = r5; -	out[2] = r6; -	out[3] = r7; -	out[4] = r8; -	out[5] = r9; -	out[6] = r10; -	out[7] = r11; - -	return r3; -} -EXPORT_SYMBOL_GPL(kvm_hypercall); - -static int kvm_para_setup(void) -{ -	extern u32 kvm_hypercall_start; -	struct device_node *hyper_node; -	u32 *insts; -	int len, i; - -	hyper_node = of_find_node_by_path("/hypervisor"); -	if (!hyper_node) -		return -1; - -	insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len); -	if (len % 4) -		return -1; -	if (len > (4 * 4)) -		return -1; - -	for (i = 0; i < (len / 4); i++) -		kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]); - -	return 0; -} -  static __init void kvm_free_tmp(void)  { -	unsigned long start, end; - -	start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK; -	end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK; - -	/* Free the tmp space we don't need */ -	for (; start < end; start += PAGE_SIZE) { -		ClearPageReserved(virt_to_page(start)); -		init_page_count(virt_to_page(start)); -		free_page(start); -		totalram_pages++; -	} +	free_reserved_area(&kvm_tmp[kvm_tmp_index], +			   &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);  }  static int __init kvm_guest_init(void) @@ -576,7 +722,7 @@ static int __init kvm_guest_init(void)  	if (!kvm_para_available())  		goto free_tmp; -	if (kvm_para_setup()) +	if (!epapr_paravirt_enabled)  		goto free_tmp;  	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))  | 
