diff options
Diffstat (limited to 'arch/powerpc/kernel/paca.c')
| -rw-r--r-- | arch/powerpc/kernel/paca.c | 49 | 
1 files changed, 38 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 3fc16e3beb9..d6e195e8cd4 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -46,7 +46,7 @@ struct lppaca lppaca[] = {  static struct lppaca *extra_lppacas;  static long __initdata lppaca_size; -static void allocate_lppacas(int nr_cpus, unsigned long limit) +static void __init allocate_lppacas(int nr_cpus, unsigned long limit)  {  	if (nr_cpus <= NR_LPPACAS)  		return; @@ -57,7 +57,7 @@ static void allocate_lppacas(int nr_cpus, unsigned long limit)  						 PAGE_SIZE, limit));  } -static struct lppaca *new_lppaca(int cpu) +static struct lppaca * __init new_lppaca(int cpu)  {  	struct lppaca *lp; @@ -70,7 +70,7 @@ static struct lppaca *new_lppaca(int cpu)  	return lp;  } -static void free_lppacas(void) +static void __init free_lppacas(void)  {  	long new_size = 0, nr; @@ -98,13 +98,32 @@ static inline void free_lppacas(void) { }  /*   * 3 persistent SLBs are registered here.  The buffer will be zero   * initially, hence will all be invaild until we actually write them. + * + * If you make the number of persistent SLB entries dynamic, please also + * update PR KVM to flush and restore them accordingly.   */ -struct slb_shadow slb_shadow[] __cacheline_aligned = { -	[0 ... (NR_CPUS-1)] = { -		.persistent = cpu_to_be32(SLB_NUM_BOLTED), -		.buffer_length = cpu_to_be32(sizeof(struct slb_shadow)), -	}, -}; +static struct slb_shadow *slb_shadow; + +static void __init allocate_slb_shadows(int nr_cpus, int limit) +{ +	int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus); +	slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit)); +	memset(slb_shadow, 0, size); +} + +static struct slb_shadow * __init init_slb_shadow(int cpu) +{ +	struct slb_shadow *s = &slb_shadow[cpu]; + +	s->persistent = cpu_to_be32(SLB_NUM_BOLTED); +	s->buffer_length = cpu_to_be32(sizeof(*s)); + +	return s; +} + +#else /* CONFIG_PPC_STD_MMU_64 */ + +static void __init allocate_slb_shadows(int nr_cpus, int limit) { }  #endif /* CONFIG_PPC_STD_MMU_64 */ @@ -136,14 +155,20 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)  	new_paca->paca_index = cpu;  	new_paca->kernel_toc = kernel_toc;  	new_paca->kernelbase = (unsigned long) _stext; -	new_paca->kernel_msr = MSR_KERNEL; +	/* Only set MSR:IR/DR when MMU is initialized */ +	new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);  	new_paca->hw_cpu_id = 0xffff;  	new_paca->kexec_state = KEXEC_STATE_NONE;  	new_paca->__current = &init_task;  	new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;  #ifdef CONFIG_PPC_STD_MMU_64 -	new_paca->slb_shadow_ptr = &slb_shadow[cpu]; +	new_paca->slb_shadow_ptr = init_slb_shadow(cpu);  #endif /* CONFIG_PPC_STD_MMU_64 */ + +#ifdef CONFIG_PPC_BOOK3E +	/* For now -- if we have threads this will be adjusted later */ +	new_paca->tcd_ptr = &new_paca->tcd; +#endif  }  /* Put the paca pointer into r13 and SPRG_PACA */ @@ -190,6 +215,8 @@ void __init allocate_pacas(void)  	allocate_lppacas(nr_cpu_ids, limit); +	allocate_slb_shadows(nr_cpu_ids, limit); +  	/* Can't use for_each_*_cpu, as they aren't functional yet */  	for (cpu = 0; cpu < nr_cpu_ids; cpu++)  		initialise_paca(&paca[cpu], cpu);  | 
