diff options
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/ia64/kernel/efi.c | 46 | ||||
| -rw-r--r-- | arch/ia64/kernel/setup.c | 6 | ||||
| -rw-r--r-- | arch/ia64/mm/tlb.c | 125 | 
3 files changed, 160 insertions, 17 deletions
| diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 728d7247a1a..003cd09b073 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -37,6 +37,7 @@  #include <asm/pgtable.h>  #include <asm/processor.h>  #include <asm/mca.h> +#include <asm/tlbflush.h>  #define EFI_DEBUG	0 @@ -403,6 +404,41 @@ efi_get_pal_addr (void)  	return NULL;  } + +static u8 __init palo_checksum(u8 *buffer, u32 length) +{ +	u8 sum = 0; +	u8 *end = buffer + length; + +	while (buffer < end) +		sum = (u8) (sum + *(buffer++)); + +	return sum; +} + +/* + * Parse and handle PALO table which is published at: + * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf + */ +static void __init handle_palo(unsigned long palo_phys) +{ +	struct palo_table *palo = __va(palo_phys); +	u8  checksum; + +	if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { +		printk(KERN_INFO "PALO signature incorrect.\n"); +		return; +	} + +	checksum = palo_checksum((u8 *)palo, palo->length); +	if (checksum) { +		printk(KERN_INFO "PALO checksum incorrect.\n"); +		return; +	} + +	setup_ptcg_sem(palo->max_tlb_purges, 1); +} +  void  efi_map_pal_code (void)  { @@ -432,6 +468,7 @@ efi_init (void)  	u64 efi_desc_size;  	char *cp, vendor[100] = "unknown";  	int i; +	unsigned long palo_phys;  	/*  	 * It's too early to be able to use the standard kernel command line @@ -496,6 +533,8 @@ efi_init (void)  	efi.hcdp       = EFI_INVALID_TABLE_ADDR;  	efi.uga        = EFI_INVALID_TABLE_ADDR; +	palo_phys      = EFI_INVALID_TABLE_ADDR; +  	for (i = 0; i < (int) efi.systab->nr_tables; i++) {  		if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {  			efi.mps = config_tables[i].table; @@ -515,10 +554,17 @@ efi_init (void)  		} else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {  			efi.hcdp = config_tables[i].table;  			printk(" HCDP=0x%lx", config_tables[i].table); +		} else if (efi_guidcmp(config_tables[i].guid, +			 PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) { +			palo_phys = config_tables[i].table; +			printk(" PALO=0x%lx", config_tables[i].table);  		}  	}  	printk("\n"); +	if (palo_phys != EFI_INVALID_TABLE_ADDR) +		handle_palo(palo_phys); +  	runtime = __va(efi.systab->runtime);  	efi.get_time = phys_get_time;  	efi.set_time = phys_set_time; diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4aa9eaea76c..1cbd26340d8 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -59,6 +59,7 @@  #include <asm/setup.h>  #include <asm/smp.h>  #include <asm/system.h> +#include <asm/tlbflush.h>  #include <asm/unistd.h>  #include <asm/hpsim.h> @@ -946,9 +947,10 @@ cpu_init (void)  #endif  	/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ -	if (ia64_pal_vm_summary(NULL, &vmi) == 0) +	if (ia64_pal_vm_summary(NULL, &vmi) == 0) {  		max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; -	else { +		setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, 0); +	} else {  		printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");  		max_ctx = (1U << 15) - 1;	/* use architected minimum */  	} diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 655da240d13..d41d6076ed0 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -11,6 +11,9 @@   * Rohit Seth <rohit.seth@intel.com>   * Ken Chen <kenneth.w.chen@intel.com>   * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation + * Copyright (C) 2007 Intel Corp + *	Fenghua Yu <fenghua.yu@intel.com> + *	Add multiple ptc.g/ptc.ga instruction support in global tlb purge.   */  #include <linux/module.h>  #include <linux/init.h> @@ -26,6 +29,7 @@  #include <asm/pal.h>  #include <asm/tlbflush.h>  #include <asm/dma.h> +#include <asm/sal.h>  static struct {  	unsigned long mask;	/* mask of supported purge page-sizes */ @@ -84,14 +88,104 @@ wrap_mmu_context (struct mm_struct *mm)  	local_flush_tlb_all();  } +/* + * Implement "spinaphores" ... like counting semaphores, but they + * spin instead of sleeping.  If there are ever any other users for + * this primitive it can be moved up to a spinaphore.h header. + */ +struct spinaphore { +	atomic_t	cur; +}; + +static inline void spinaphore_init(struct spinaphore *ss, int val) +{ +	atomic_set(&ss->cur, val); +} + +static inline void down_spin(struct spinaphore *ss) +{ +	while (unlikely(!atomic_add_unless(&ss->cur, -1, 0))) +		while (atomic_read(&ss->cur) == 0) +			cpu_relax(); +} + +static inline void up_spin(struct spinaphore *ss) +{ +	atomic_add(1, &ss->cur); +} + +static struct spinaphore ptcg_sem; +static u16 nptcg = 1; +static int need_ptcg_sem = 1; +static int toolatetochangeptcgsem = 0; + +/* + * Maximum number of simultaneous ptc.g purges in the system can + * be defined by PAL_VM_SUMMARY (in which case we should take + * the smallest value for any cpu in the system) or by the PAL + * override table (in which case we should ignore the value from + * PAL_VM_SUMMARY). + * + * Complicating the logic here is the fact that num_possible_cpus() + * isn't fully setup until we start bringing cpus online. + */ +void +setup_ptcg_sem(int max_purges, int from_palo) +{ +	static int have_palo; +	static int firstcpu = 1; + +	if (toolatetochangeptcgsem) { +		BUG_ON(max_purges < nptcg); +		return; +	} + +	if (from_palo) { +		have_palo = 1; + +		/* In PALO max_purges == 0 really means it! */ +		if (max_purges == 0) +			panic("Whoa! Platform does not support global TLB purges.\n"); +		nptcg = max_purges; +		if (nptcg == PALO_MAX_TLB_PURGES) { +			need_ptcg_sem = 0; +			return; +		} +		goto resetsema; +	} +	if (have_palo) { +		if (nptcg != PALO_MAX_TLB_PURGES) +			need_ptcg_sem = (num_possible_cpus() > nptcg); +		return; +	} + +	/* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */ +	if (max_purges == 0) max_purges = 1; + +	if (firstcpu) { +		nptcg = max_purges; +		firstcpu = 0; +	} +	if (max_purges < nptcg) +		nptcg = max_purges; +	if (nptcg == PAL_MAX_PURGES) { +		need_ptcg_sem = 0; +		return; +	} else +		need_ptcg_sem = (num_possible_cpus() > nptcg); + +resetsema: +	spinaphore_init(&ptcg_sem, max_purges); +} +  void  ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,  		       unsigned long end, unsigned long nbits)  { -	static DEFINE_SPINLOCK(ptcg_lock); -  	struct mm_struct *active_mm = current->active_mm; +	toolatetochangeptcgsem = 1; +  	if (mm != active_mm) {  		/* Restore region IDs for mm */  		if (mm && active_mm) { @@ -102,19 +196,20 @@ ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,  		}  	} -	/* HW requires global serialization of ptc.ga.  */ -	spin_lock(&ptcg_lock); -	{ -		do { -			/* -			 * Flush ALAT entries also. -			 */ -			ia64_ptcga(start, (nbits<<2)); -			ia64_srlz_i(); -			start += (1UL << nbits); -		} while (start < end); -	} -	spin_unlock(&ptcg_lock); +	if (need_ptcg_sem) +		down_spin(&ptcg_sem); + +	do { +		/* +		 * Flush ALAT entries also. +		 */ +		ia64_ptcga(start, (nbits << 2)); +		ia64_srlz_i(); +		start += (1UL << nbits); +	} while (start < end); + +	if (need_ptcg_sem) +		up_spin(&ptcg_sem);          if (mm != active_mm) {                  activate_context(active_mm); | 
