diff options
Diffstat (limited to 'arch/arm/kernel')
34 files changed, 1479 insertions, 1128 deletions
| diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 7ad2d5cf700..5bbec7b8183 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -19,9 +19,10 @@ obj-y		:= elf.o entry-armv.o entry-common.o irq.o opcodes.o \  		   process.o ptrace.o return_address.o sched_clock.o \  		   setup.o signal.o stacktrace.o sys_arm.o time.o traps.o -obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o +obj-$(CONFIG_ATAGS)		+= atags_parse.o +obj-$(CONFIG_ATAGS_PROC)	+= atags_proc.o +obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o -obj-$(CONFIG_LEDS)		+= leds.o  obj-$(CONFIG_OC_ETM)		+= etm.o  obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o  obj-$(CONFIG_ISA_DMA_API)	+= dma.o @@ -52,7 +53,6 @@ test-kprobes-objs		+= kprobes-test-thumb.o  else  test-kprobes-objs		+= kprobes-test-arm.o  endif -obj-$(CONFIG_ATAGS_PROC)	+= atags.o  obj-$(CONFIG_OABI_COMPAT)	+= sys_oabi-compat.o  obj-$(CONFIG_ARM_THUMBEE)	+= thumbee.o  obj-$(CONFIG_KGDB)		+= kgdb.o @@ -69,8 +69,7 @@ obj-$(CONFIG_CPU_XSC3)		+= xscale-cp0.o  obj-$(CONFIG_CPU_MOHAWK)	+= xscale-cp0.o  obj-$(CONFIG_CPU_PJ4)		+= pj4-cp0.o  obj-$(CONFIG_IWMMXT)		+= iwmmxt.o -obj-$(CONFIG_CPU_HAS_PMU)	+= pmu.o -obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o +obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o perf_event_cpu.o  AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt  obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o @@ -82,4 +81,6 @@ head-y			:= head$(MMUEXT).o  obj-$(CONFIG_DEBUG_LL)	+= debug.o  obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o +obj-$(CONFIG_ARM_VIRT_EXT)	+= hyp-stub.o +  extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index cf258807160..c8ef20747ee 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -21,18 +21,28 @@  #include <linux/io.h>  #include <asm/cputype.h> +#include <asm/delay.h>  #include <asm/localtimer.h>  #include <asm/arch_timer.h>  #include <asm/system_info.h>  #include <asm/sched_clock.h>  static unsigned long arch_timer_rate; -static int arch_timer_ppi; -static int arch_timer_ppi2; + +enum ppi_nr { +	PHYS_SECURE_PPI, +	PHYS_NONSECURE_PPI, +	VIRT_PPI, +	HYP_PPI, +	MAX_TIMER_PPI +}; + +static int arch_timer_ppi[MAX_TIMER_PPI];  static struct clock_event_device __percpu **arch_timer_evt; +static struct delay_timer arch_delay_timer; -extern void init_current_timer_delay(unsigned long freq); +static bool arch_timer_use_virtual = true;  /*   * Architected system timer support. @@ -46,50 +56,104 @@ extern void init_current_timer_delay(unsigned long freq);  #define ARCH_TIMER_REG_FREQ		1  #define ARCH_TIMER_REG_TVAL		2 -static void arch_timer_reg_write(int reg, u32 val) +#define ARCH_TIMER_PHYS_ACCESS		0 +#define ARCH_TIMER_VIRT_ACCESS		1 + +/* + * These register accessors are marked inline so the compiler can + * nicely work out which register we want, and chuck away the rest of + * the code. At least it does so with a recent GCC (4.6.3). + */ +static inline void arch_timer_reg_write(const int access, const int reg, u32 val)  { -	switch (reg) { -	case ARCH_TIMER_REG_CTRL: -		asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); -		break; -	case ARCH_TIMER_REG_TVAL: -		asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); -		break; +	if (access == ARCH_TIMER_PHYS_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); +			break; +		} +	} + +	if (access == ARCH_TIMER_VIRT_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); +			break; +		}  	}  	isb();  } -static u32 arch_timer_reg_read(int reg) +static inline u32 arch_timer_reg_read(const int access, const int reg)  { -	u32 val; +	u32 val = 0; + +	if (access == ARCH_TIMER_PHYS_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); +			break; +		case ARCH_TIMER_REG_FREQ: +			asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); +			break; +		} +	} -	switch (reg) { -	case ARCH_TIMER_REG_CTRL: -		asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); -		break; -	case ARCH_TIMER_REG_FREQ: -		asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); -		break; -	case ARCH_TIMER_REG_TVAL: -		asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); -		break; -	default: -		BUG(); +	if (access == ARCH_TIMER_VIRT_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); +			break; +		}  	}  	return val;  } -static irqreturn_t arch_timer_handler(int irq, void *dev_id) +static inline cycle_t arch_timer_counter_read(const int access)  { -	struct clock_event_device *evt = *(struct clock_event_device **)dev_id; -	unsigned long ctrl; +	cycle_t cval = 0; + +	if (access == ARCH_TIMER_PHYS_ACCESS) +		asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); + +	if (access == ARCH_TIMER_VIRT_ACCESS) +		asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); + +	return cval; +} + +static inline cycle_t arch_counter_get_cntpct(void) +{ +	return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS); +} -	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); +static inline cycle_t arch_counter_get_cntvct(void) +{ +	return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS); +} + +static irqreturn_t inline timer_handler(const int access, +					struct clock_event_device *evt) +{ +	unsigned long ctrl; +	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);  	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {  		ctrl |= ARCH_TIMER_CTRL_IT_MASK; -		arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); +		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);  		evt->event_handler(evt);  		return IRQ_HANDLED;  	} @@ -97,63 +161,100 @@ static irqreturn_t arch_timer_handler(int irq, void *dev_id)  	return IRQ_NONE;  } -static void arch_timer_disable(void) +static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)  { -	unsigned long ctrl; +	struct clock_event_device *evt = *(struct clock_event_device **)dev_id; -	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); -	ctrl &= ~ARCH_TIMER_CTRL_ENABLE; -	arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); +	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);  } -static void arch_timer_set_mode(enum clock_event_mode mode, -				struct clock_event_device *clk) +static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)  { +	struct clock_event_device *evt = *(struct clock_event_device **)dev_id; + +	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); +} + +static inline void timer_set_mode(const int access, int mode) +{ +	unsigned long ctrl;  	switch (mode) {  	case CLOCK_EVT_MODE_UNUSED:  	case CLOCK_EVT_MODE_SHUTDOWN: -		arch_timer_disable(); +		ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); +		ctrl &= ~ARCH_TIMER_CTRL_ENABLE; +		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);  		break;  	default:  		break;  	}  } -static int arch_timer_set_next_event(unsigned long evt, -				     struct clock_event_device *unused) +static void arch_timer_set_mode_virt(enum clock_event_mode mode, +				     struct clock_event_device *clk)  { -	unsigned long ctrl; +	timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); +} -	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); +static void arch_timer_set_mode_phys(enum clock_event_mode mode, +				     struct clock_event_device *clk) +{ +	timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); +} + +static inline void set_next_event(const int access, unsigned long evt) +{ +	unsigned long ctrl; +	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);  	ctrl |= ARCH_TIMER_CTRL_ENABLE;  	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; +	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); +	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); +} -	arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt); -	arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); +static int arch_timer_set_next_event_virt(unsigned long evt, +					  struct clock_event_device *unused) +{ +	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); +	return 0; +} +static int arch_timer_set_next_event_phys(unsigned long evt, +					  struct clock_event_device *unused) +{ +	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);  	return 0;  }  static int __cpuinit arch_timer_setup(struct clock_event_device *clk)  { -	/* Be safe... */ -	arch_timer_disable(); -  	clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;  	clk->name = "arch_sys_timer";  	clk->rating = 450; -	clk->set_mode = arch_timer_set_mode; -	clk->set_next_event = arch_timer_set_next_event; -	clk->irq = arch_timer_ppi; +	if (arch_timer_use_virtual) { +		clk->irq = arch_timer_ppi[VIRT_PPI]; +		clk->set_mode = arch_timer_set_mode_virt; +		clk->set_next_event = arch_timer_set_next_event_virt; +	} else { +		clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; +		clk->set_mode = arch_timer_set_mode_phys; +		clk->set_next_event = arch_timer_set_next_event_phys; +	} + +	clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);  	clockevents_config_and_register(clk, arch_timer_rate,  					0xf, 0x7fffffff);  	*__this_cpu_ptr(arch_timer_evt) = clk; -	enable_percpu_irq(clk->irq, 0); -	if (arch_timer_ppi2) -		enable_percpu_irq(arch_timer_ppi2, 0); +	if (arch_timer_use_virtual) +		enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); +	else { +		enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); +		if (arch_timer_ppi[PHYS_NONSECURE_PPI]) +			enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); +	}  	return 0;  } @@ -173,8 +274,8 @@ static int arch_timer_available(void)  		return -ENXIO;  	if (arch_timer_rate == 0) { -		arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0); -		freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ); +		freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS, +					   ARCH_TIMER_REG_FREQ);  		/* Check the timer frequency. */  		if (freq == 0) { @@ -185,52 +286,57 @@ static int arch_timer_available(void)  		arch_timer_rate = freq;  	} -	pr_info_once("Architected local timer running at %lu.%02luMHz.\n", -		     arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100); +	pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", +		     arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100, +		     arch_timer_use_virtual ? "virt" : "phys");  	return 0;  } -static inline cycle_t arch_counter_get_cntpct(void) +static u32 notrace arch_counter_get_cntpct32(void)  { -	u32 cvall, cvalh; - -	asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh)); +	cycle_t cnt = arch_counter_get_cntpct(); -	return ((cycle_t) cvalh << 32) | cvall; -} - -static inline cycle_t arch_counter_get_cntvct(void) -{ -	u32 cvall, cvalh; - -	asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh)); - -	return ((cycle_t) cvalh << 32) | cvall; +	/* +	 * The sched_clock infrastructure only knows about counters +	 * with at most 32bits. Forget about the upper 24 bits for the +	 * time being... +	 */ +	return (u32)cnt;  }  static u32 notrace arch_counter_get_cntvct32(void)  { -	cycle_t cntvct = arch_counter_get_cntvct(); +	cycle_t cnt = arch_counter_get_cntvct();  	/*  	 * The sched_clock infrastructure only knows about counters  	 * with at most 32bits. Forget about the upper 24 bits for the  	 * time being...  	 */ -	return (u32)(cntvct & (u32)~0); +	return (u32)cnt;  }  static cycle_t arch_counter_read(struct clocksource *cs)  { +	/* +	 * Always use the physical counter for the clocksource. +	 * CNTHCTL.PL1PCTEN must be set to 1. +	 */  	return arch_counter_get_cntpct();  } -int read_current_timer(unsigned long *timer_val) +static unsigned long arch_timer_read_current_timer(void)  { -	if (!arch_timer_rate) -		return -ENXIO; -	*timer_val = arch_counter_get_cntpct(); -	return 0; +	return arch_counter_get_cntpct(); +} + +static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) +{ +	/* +	 * Always use the physical counter for the clocksource. +	 * CNTHCTL.PL1PCTEN must be set to 1. +	 */ +	return arch_counter_get_cntpct();  }  static struct clocksource clocksource_counter = { @@ -241,14 +347,32 @@ static struct clocksource clocksource_counter = {  	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,  }; +static struct cyclecounter cyclecounter = { +	.read	= arch_counter_read_cc, +	.mask	= CLOCKSOURCE_MASK(56), +}; + +static struct timecounter timecounter; + +struct timecounter *arch_timer_get_timecounter(void) +{ +	return &timecounter; +} +  static void __cpuinit arch_timer_stop(struct clock_event_device *clk)  {  	pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",  		 clk->irq, smp_processor_id()); -	disable_percpu_irq(clk->irq); -	if (arch_timer_ppi2) -		disable_percpu_irq(arch_timer_ppi2); -	arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk); + +	if (arch_timer_use_virtual) +		disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); +	else { +		disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); +		if (arch_timer_ppi[PHYS_NONSECURE_PPI]) +			disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); +	} + +	clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);  }  static struct local_timer_ops arch_timer_ops __cpuinitdata = { @@ -261,36 +385,48 @@ static struct clock_event_device arch_timer_global_evt;  static int __init arch_timer_register(void)  {  	int err; +	int ppi;  	err = arch_timer_available();  	if (err) -		return err; +		goto out;  	arch_timer_evt = alloc_percpu(struct clock_event_device *); -	if (!arch_timer_evt) -		return -ENOMEM; +	if (!arch_timer_evt) { +		err = -ENOMEM; +		goto out; +	}  	clocksource_register_hz(&clocksource_counter, arch_timer_rate); +	cyclecounter.mult = clocksource_counter.mult; +	cyclecounter.shift = clocksource_counter.shift; +	timecounter_init(&timecounter, &cyclecounter, +			 arch_counter_get_cntpct()); + +	if (arch_timer_use_virtual) { +		ppi = arch_timer_ppi[VIRT_PPI]; +		err = request_percpu_irq(ppi, arch_timer_handler_virt, +					 "arch_timer", arch_timer_evt); +	} else { +		ppi = arch_timer_ppi[PHYS_SECURE_PPI]; +		err = request_percpu_irq(ppi, arch_timer_handler_phys, +					 "arch_timer", arch_timer_evt); +		if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { +			ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; +			err = request_percpu_irq(ppi, arch_timer_handler_phys, +						 "arch_timer", arch_timer_evt); +			if (err) +				free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], +						arch_timer_evt); +		} +	} -	err = request_percpu_irq(arch_timer_ppi, arch_timer_handler, -				 "arch_timer", arch_timer_evt);  	if (err) {  		pr_err("arch_timer: can't register interrupt %d (%d)\n", -		       arch_timer_ppi, err); +		       ppi, err);  		goto out_free;  	} -	if (arch_timer_ppi2) { -		err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler, -					 "arch_timer", arch_timer_evt); -		if (err) { -			pr_err("arch_timer: can't register interrupt %d (%d)\n", -			       arch_timer_ppi2, err); -			arch_timer_ppi2 = 0; -			goto out_free_irq; -		} -	} -  	err = local_timer_register(&arch_timer_ops);  	if (err) {  		/* @@ -302,21 +438,29 @@ static int __init arch_timer_register(void)  		arch_timer_global_evt.cpumask = cpumask_of(0);  		err = arch_timer_setup(&arch_timer_global_evt);  	} -  	if (err)  		goto out_free_irq; -	init_current_timer_delay(arch_timer_rate); +	/* Use the architected timer for the delay loop. */ +	arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; +	arch_delay_timer.freq = arch_timer_rate; +	register_current_timer_delay(&arch_delay_timer);  	return 0;  out_free_irq: -	free_percpu_irq(arch_timer_ppi, arch_timer_evt); -	if (arch_timer_ppi2) -		free_percpu_irq(arch_timer_ppi2, arch_timer_evt); +	if (arch_timer_use_virtual) +		free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); +	else { +		free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], +				arch_timer_evt); +		if (arch_timer_ppi[PHYS_NONSECURE_PPI]) +			free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], +					arch_timer_evt); +	}  out_free:  	free_percpu(arch_timer_evt); - +out:  	return err;  } @@ -329,6 +473,7 @@ int __init arch_timer_of_register(void)  {  	struct device_node *np;  	u32 freq; +	int i;  	np = of_find_matching_node(NULL, arch_timer_of_match);  	if (!np) { @@ -340,22 +485,40 @@ int __init arch_timer_of_register(void)  	if (!of_property_read_u32(np, "clock-frequency", &freq))  		arch_timer_rate = freq; -	arch_timer_ppi = irq_of_parse_and_map(np, 0); -	arch_timer_ppi2 = irq_of_parse_and_map(np, 1); -	pr_info("arch_timer: found %s irqs %d %d\n", -		np->name, arch_timer_ppi, arch_timer_ppi2); +	for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) +		arch_timer_ppi[i] = irq_of_parse_and_map(np, i); + +	/* +	 * If no interrupt provided for virtual timer, we'll have to +	 * stick to the physical timer. It'd better be accessible... +	 */ +	if (!arch_timer_ppi[VIRT_PPI]) { +		arch_timer_use_virtual = false; + +		if (!arch_timer_ppi[PHYS_SECURE_PPI] || +		    !arch_timer_ppi[PHYS_NONSECURE_PPI]) { +			pr_warn("arch_timer: No interrupt available, giving up\n"); +			return -EINVAL; +		} +	}  	return arch_timer_register();  }  int __init arch_timer_sched_clock_init(void)  { +	u32 (*cnt32)(void);  	int err;  	err = arch_timer_available();  	if (err)  		return err; -	setup_sched_clock(arch_counter_get_cntvct32, 32, arch_timer_rate); +	if (arch_timer_use_virtual) +		cnt32 = arch_counter_get_cntvct32; +	else +		cnt32 = arch_counter_get_cntpct32; + +	setup_sched_clock(cnt32, 32, arch_timer_rate);  	return 0;  } diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 1429d8989fb..c985b481192 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -59,10 +59,12 @@ int main(void)    DEFINE(TI_USED_CP,		offsetof(struct thread_info, used_cp));    DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));    DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate)); +#ifdef CONFIG_VFP    DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));  #ifdef CONFIG_SMP    DEFINE(VFP_CPU,		offsetof(union vfp_state, hard.cpu));  #endif +#endif  #ifdef CONFIG_ARM_THUMBEE    DEFINE(TI_THUMBEE_STATE,	offsetof(struct thread_info, thumbee_state));  #endif diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h index e5f028d214a..9edc9692332 100644 --- a/arch/arm/kernel/atags.h +++ b/arch/arm/kernel/atags.h @@ -3,3 +3,17 @@ extern void save_atags(struct tag *tags);  #else  static inline void save_atags(struct tag *tags) { }  #endif + +void convert_to_tag_list(struct tag *tags); + +#ifdef CONFIG_ATAGS +struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr); +#else +static inline struct machine_desc * +setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) +{ +	early_print("no ATAGS support: can't continue\n"); +	while (true); +	unreachable(); +} +#endif diff --git a/arch/arm/kernel/compat.c b/arch/arm/kernel/atags_compat.c index 925652318b8..5236ad38f41 100644 --- a/arch/arm/kernel/compat.c +++ b/arch/arm/kernel/atags_compat.c @@ -1,5 +1,5 @@  /* - *  linux/arch/arm/kernel/compat.c + *  linux/arch/arm/kernel/atags_compat.c   *   *  Copyright (C) 2001 Russell King   * @@ -26,7 +26,7 @@  #include <asm/mach/arch.h> -#include "compat.h" +#include "atags.h"  /*   * Usage: diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c new file mode 100644 index 00000000000..14512e6931d --- /dev/null +++ b/arch/arm/kernel/atags_parse.c @@ -0,0 +1,238 @@ +/* + * Tag parsing. + * + * Copyright (C) 1995-2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * This is the traditional way of passing data to the kernel at boot time.  Rather + * than passing a fixed inflexible structure to the kernel, we pass a list + * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE + * tag for the list to be recognised (to distinguish the tagged list from + * a param_struct).  The list is terminated with a zero-length tag (this tag + * is not parsed in any way). + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/root_dev.h> +#include <linux/screen_info.h> + +#include <asm/setup.h> +#include <asm/system_info.h> +#include <asm/page.h> +#include <asm/mach/arch.h> + +#include "atags.h" + +static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; + +#ifndef MEM_SIZE +#define MEM_SIZE	(16*1024*1024) +#endif + +static struct { +	struct tag_header hdr1; +	struct tag_core   core; +	struct tag_header hdr2; +	struct tag_mem32  mem; +	struct tag_header hdr3; +} default_tags __initdata = { +	{ tag_size(tag_core), ATAG_CORE }, +	{ 1, PAGE_SIZE, 0xff }, +	{ tag_size(tag_mem32), ATAG_MEM }, +	{ MEM_SIZE }, +	{ 0, ATAG_NONE } +}; + +static int __init parse_tag_core(const struct tag *tag) +{ +	if (tag->hdr.size > 2) { +		if ((tag->u.core.flags & 1) == 0) +			root_mountflags &= ~MS_RDONLY; +		ROOT_DEV = old_decode_dev(tag->u.core.rootdev); +	} +	return 0; +} + +__tagtable(ATAG_CORE, parse_tag_core); + +static int __init parse_tag_mem32(const struct tag *tag) +{ +	return arm_add_memory(tag->u.mem.start, tag->u.mem.size); +} + +__tagtable(ATAG_MEM, parse_tag_mem32); + +#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) +static int __init parse_tag_videotext(const struct tag *tag) +{ +	screen_info.orig_x            = tag->u.videotext.x; +	screen_info.orig_y            = tag->u.videotext.y; +	screen_info.orig_video_page   = tag->u.videotext.video_page; +	screen_info.orig_video_mode   = tag->u.videotext.video_mode; +	screen_info.orig_video_cols   = tag->u.videotext.video_cols; +	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; +	screen_info.orig_video_lines  = tag->u.videotext.video_lines; +	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga; +	screen_info.orig_video_points = tag->u.videotext.video_points; +	return 0; +} + +__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); +#endif + +#ifdef CONFIG_BLK_DEV_RAM +static int __init parse_tag_ramdisk(const struct tag *tag) +{ +	extern int rd_size, rd_image_start, rd_prompt, rd_doload; + +	rd_image_start = tag->u.ramdisk.start; +	rd_doload = (tag->u.ramdisk.flags & 1) == 0; +	rd_prompt = (tag->u.ramdisk.flags & 2) == 0; + +	if (tag->u.ramdisk.size) +		rd_size = tag->u.ramdisk.size; + +	return 0; +} + +__tagtable(ATAG_RAMDISK, parse_tag_ramdisk); +#endif + +static int __init parse_tag_serialnr(const struct tag *tag) +{ +	system_serial_low = tag->u.serialnr.low; +	system_serial_high = tag->u.serialnr.high; +	return 0; +} + +__tagtable(ATAG_SERIAL, parse_tag_serialnr); + +static int __init parse_tag_revision(const struct tag *tag) +{ +	system_rev = tag->u.revision.rev; +	return 0; +} + +__tagtable(ATAG_REVISION, parse_tag_revision); + +static int __init parse_tag_cmdline(const struct tag *tag) +{ +#if defined(CONFIG_CMDLINE_EXTEND) +	strlcat(default_command_line, " ", COMMAND_LINE_SIZE); +	strlcat(default_command_line, tag->u.cmdline.cmdline, +		COMMAND_LINE_SIZE); +#elif defined(CONFIG_CMDLINE_FORCE) +	pr_warning("Ignoring tag cmdline (using the default kernel command line)\n"); +#else +	strlcpy(default_command_line, tag->u.cmdline.cmdline, +		COMMAND_LINE_SIZE); +#endif +	return 0; +} + +__tagtable(ATAG_CMDLINE, parse_tag_cmdline); + +/* + * Scan the tag table for this tag, and call its parse function. + * The tag table is built by the linker from all the __tagtable + * declarations. + */ +static int __init parse_tag(const struct tag *tag) +{ +	extern struct tagtable __tagtable_begin, __tagtable_end; +	struct tagtable *t; + +	for (t = &__tagtable_begin; t < &__tagtable_end; t++) +		if (tag->hdr.tag == t->tag) { +			t->parse(tag); +			break; +		} + +	return t < &__tagtable_end; +} + +/* + * Parse all tags in the list, checking both the global and architecture + * specific tag tables. + */ +static void __init parse_tags(const struct tag *t) +{ +	for (; t->hdr.size; t = tag_next(t)) +		if (!parse_tag(t)) +			printk(KERN_WARNING +				"Ignoring unrecognised tag 0x%08x\n", +				t->hdr.tag); +} + +static void __init squash_mem_tags(struct tag *tag) +{ +	for (; tag->hdr.size; tag = tag_next(tag)) +		if (tag->hdr.tag == ATAG_MEM) +			tag->hdr.tag = ATAG_NONE; +} + +struct machine_desc * __init setup_machine_tags(phys_addr_t __atags_pointer, +						unsigned int machine_nr) +{ +	struct tag *tags = (struct tag *)&default_tags; +	struct machine_desc *mdesc = NULL, *p; +	char *from = default_command_line; + +	default_tags.mem.start = PHYS_OFFSET; + +	/* +	 * locate machine in the list of supported machines. +	 */ +	for_each_machine_desc(p) +		if (machine_nr == p->nr) { +			printk("Machine: %s\n", p->name); +			mdesc = p; +			break; +		} + +	if (!mdesc) { +		early_print("\nError: unrecognized/unsupported machine ID" +			    " (r1 = 0x%08x).\n\n", machine_nr); +		dump_machine_table(); /* does not return */ +	} + +	if (__atags_pointer) +		tags = phys_to_virt(__atags_pointer); +	else if (mdesc->atag_offset) +		tags = (void *)(PAGE_OFFSET + mdesc->atag_offset); + +#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) +	/* +	 * If we have the old style parameters, convert them to +	 * a tag list. +	 */ +	if (tags->hdr.tag != ATAG_CORE) +		convert_to_tag_list(tags); +#endif +	if (tags->hdr.tag != ATAG_CORE) { +		early_print("Warning: Neither atags nor dtb found\n"); +		tags = (struct tag *)&default_tags; +	} + +	if (mdesc->fixup) +		mdesc->fixup(tags, &from, &meminfo); + +	if (tags->hdr.tag == ATAG_CORE) { +		if (meminfo.nr_banks != 0) +			squash_mem_tags(tags); +		save_atags(tags); +		parse_tags(tags); +	} + +	/* parse_early_param needs a boot_command_line */ +	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); + +	return mdesc; +} diff --git a/arch/arm/kernel/atags.c b/arch/arm/kernel/atags_proc.c index 42a1a1415fa..42a1a1415fa 100644 --- a/arch/arm/kernel/atags.c +++ b/arch/arm/kernel/atags_proc.c diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 9cf16b83bbb..9b722612553 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -13,6 +13,7 @@  #include <linux/io.h>  #include <asm/mach-types.h> +#include <asm/mach/map.h>  #include <asm/mach/pci.h>  static int debug_pci; @@ -414,6 +415,38 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)  	return irq;  } +static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) +{ +	int ret; +	struct pci_host_bridge_window *window; + +	if (list_empty(&sys->resources)) { +		pci_add_resource_offset(&sys->resources, +			 &iomem_resource, sys->mem_offset); +	} + +	list_for_each_entry(window, &sys->resources, list) { +		if (resource_type(window->res) == IORESOURCE_IO) +			return 0; +	} + +	sys->io_res.start = (busnr * SZ_64K) ?  : pcibios_min_io; +	sys->io_res.end = (busnr + 1) * SZ_64K - 1; +	sys->io_res.flags = IORESOURCE_IO; +	sys->io_res.name = sys->io_res_name; +	sprintf(sys->io_res_name, "PCI%d I/O", busnr); + +	ret = request_resource(&ioport_resource, &sys->io_res); +	if (ret) { +		pr_err("PCI: unable to allocate I/O port region (%d)\n", ret); +		return ret; +	} +	pci_add_resource_offset(&sys->resources, &sys->io_res, +				sys->io_offset); + +	return 0; +} +  static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)  {  	struct pci_sys_data *sys = NULL; @@ -436,11 +469,10 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)  		ret = hw->setup(nr, sys);  		if (ret > 0) { -			if (list_empty(&sys->resources)) { -				pci_add_resource_offset(&sys->resources, -					 &ioport_resource, sys->io_offset); -				pci_add_resource_offset(&sys->resources, -					 &iomem_resource, sys->mem_offset); +			ret = pcibios_init_resources(nr, sys); +			if (ret)  { +				kfree(sys); +				break;  			}  			if (hw->scan) @@ -618,3 +650,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,  	return 0;  } + +void __init pci_map_io_early(unsigned long pfn) +{ +	struct map_desc pci_io_desc = { +		.virtual	= PCI_IO_VIRT_BASE, +		.type		= MT_DEVICE, +		.length		= SZ_64K, +	}; + +	pci_io_desc.pfn = pfn; +	iotable_init(&pci_io_desc, 1); +} diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index e337879595e..831cd38c8d9 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -20,7 +20,7 @@  		CALL(sys_creat)  		CALL(sys_link)  /* 10 */	CALL(sys_unlink) -		CALL(sys_execve_wrapper) +		CALL(sys_execve)  		CALL(sys_chdir)  		CALL(OBSOLETE(sys_time))	/* used by libc4 */  		CALL(sys_mknod) diff --git a/arch/arm/kernel/compat.h b/arch/arm/kernel/compat.h deleted file mode 100644 index 39264ab1b9c..00000000000 --- a/arch/arm/kernel/compat.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - *  linux/arch/arm/kernel/compat.h - * - *  Copyright (C) 2001 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -extern void convert_to_tag_list(struct tag *tags); diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index c45522c3678..66f711b2e0e 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -20,90 +20,9 @@   * references to these in a production kernel!   */ -#if defined(CONFIG_DEBUG_ICEDCC) -		@@ debug using ARM EmbeddedICE DCC channel - -		.macro	addruart, rp, rv, tmp -		.endm - -#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) - -		.macro	senduart, rd, rx -		mcr	p14, 0, \rd, c0, c5, 0 -		.endm - -		.macro	busyuart, rd, rx -1001: -		mrc	p14, 0, \rx, c0, c1, 0 -		tst	\rx, #0x20000000 -		beq	1001b -		.endm - -		.macro	waituart, rd, rx -		mov	\rd, #0x2000000 -1001: -		subs	\rd, \rd, #1 -		bmi	1002f -		mrc	p14, 0, \rx, c0, c1, 0 -		tst	\rx, #0x20000000 -		bne	1001b -1002: -		.endm - -#elif defined(CONFIG_CPU_XSCALE) - -		.macro	senduart, rd, rx -		mcr	p14, 0, \rd, c8, c0, 0 -		.endm - -		.macro	busyuart, rd, rx -1001: -		mrc	p14, 0, \rx, c14, c0, 0 -		tst	\rx, #0x10000000 -		beq	1001b -		.endm - -		.macro	waituart, rd, rx -		mov	\rd, #0x10000000 -1001: -		subs	\rd, \rd, #1 -		bmi	1002f -		mrc	p14, 0, \rx, c14, c0, 0 -		tst	\rx, #0x10000000 -		bne	1001b -1002: -		.endm - -#else - -		.macro	senduart, rd, rx -		mcr	p14, 0, \rd, c1, c0, 0 -		.endm - -		.macro	busyuart, rd, rx -1001: -		mrc	p14, 0, \rx, c0, c0, 0 -		tst	\rx, #2 -		beq	1001b - -		.endm - -		.macro	waituart, rd, rx -		mov	\rd, #0x2000000 -1001: -		subs	\rd, \rd, #1 -		bmi	1002f -		mrc	p14, 0, \rx, c0, c0, 0 -		tst	\rx, #2 -		bne	1001b -1002: -		.endm - -#endif	/* CONFIG_CPU_V6 */ - -#elif !defined(CONFIG_DEBUG_SEMIHOSTING) -#include <mach/debug-macro.S> -#endif	/* CONFIG_DEBUG_ICEDCC */ +#if !defined(CONFIG_DEBUG_SEMIHOSTING) +#include CONFIG_DEBUG_LL_INCLUDE +#endif  #ifdef CONFIG_MMU  		.macro	addruart_current, rx, tmp1, tmp2 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 978eac57e04..34711757ba5 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -86,14 +86,26 @@ ENDPROC(ret_to_user)   */  ENTRY(ret_from_fork)  	bl	schedule_tail -	get_thread_info tsk -	mov	why, #1 +	cmp	r5, #0 +	movne	r0, r4 +	adrne	lr, BSYM(1f) +	movne	pc, r5 +1:	get_thread_info tsk  	b	ret_slow_syscall  ENDPROC(ret_from_fork)  	.equ NR_syscalls,0  #define CALL(x) .equ NR_syscalls,NR_syscalls+1  #include "calls.S" + +/* + * Ensure that the system call table is equal to __NR_syscalls, + * which is the value the rest of the system sees + */ +.ifne NR_syscalls - __NR_syscalls +.error "__NR_syscalls is not equal to the size of the syscall table" +.endif +  #undef CALL  #define CALL(x) .long x @@ -508,11 +520,6 @@ sys_vfork_wrapper:  		b	sys_vfork  ENDPROC(sys_vfork_wrapper) -sys_execve_wrapper: -		add	r3, sp, #S_OFF -		b	sys_execve -ENDPROC(sys_execve_wrapper) -  sys_clone_wrapper:  		add	ip, sp, #S_OFF  		str	ip, [sp, #4] diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 3db960e20cb..4eee351f466 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -23,8 +23,8 @@  #include <asm/thread_info.h>  #include <asm/pgtable.h> -#ifdef CONFIG_DEBUG_LL -#include <mach/debug-macro.S> +#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) +#include CONFIG_DEBUG_LL_INCLUDE  #endif  /* @@ -83,8 +83,12 @@ ENTRY(stext)   THUMB(	.thumb			)	@ switch to Thumb now.   THUMB(1:			) -	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode -						@ and irqs disabled +#ifdef CONFIG_ARM_VIRT_EXT +	bl	__hyp_stub_install +#endif +	@ ensure svc mode and all interrupts masked +	safe_svcmode_maskall r9 +  	mrc	p15, 0, r9, c0, c0		@ get processor id  	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid  	movs	r10, r5				@ invalid processor (r5=0)? @@ -326,7 +330,11 @@ ENTRY(secondary_startup)  	 * the processor type - there is no need to check the machine type  	 * as it has already been validated by the primary processor.  	 */ -	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 +#ifdef CONFIG_ARM_VIRT_EXT +	bl	__hyp_stub_install +#endif +	safe_svcmode_maskall r9 +  	mrc	p15, 0, r9, c0, c0		@ get processor id  	bl	__lookup_processor_type  	movs	r10, r5				@ invalid processor? diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S new file mode 100644 index 00000000000..65b2417aebc --- /dev/null +++ b/arch/arm/kernel/hyp-stub.S @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2012 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/init.h> +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/virt.h> + +#ifndef ZIMAGE +/* + * For the kernel proper, we need to find out the CPU boot mode long after + * boot, so we need to store it in a writable variable. + * + * This is not in .bss, because we set it sufficiently early that the boot-time + * zeroing of .bss would clobber it. + */ +.data +ENTRY(__boot_cpu_mode) +	.long	0 +.text + +	/* +	 * Save the primary CPU boot mode. Requires 3 scratch registers. +	 */ +	.macro	store_primary_cpu_mode	reg1, reg2, reg3 +	mrs	\reg1, cpsr +	and	\reg1, \reg1, #MODE_MASK +	adr	\reg2, .L__boot_cpu_mode_offset +	ldr	\reg3, [\reg2] +	str	\reg1, [\reg2, \reg3] +	.endm + +	/* +	 * Compare the current mode with the one saved on the primary CPU. +	 * If they don't match, record that fact. The Z bit indicates +	 * if there's a match or not. +	 * Requires 3 additionnal scratch registers. +	 */ +	.macro	compare_cpu_mode_with_primary mode, reg1, reg2, reg3 +	adr	\reg2, .L__boot_cpu_mode_offset +	ldr	\reg3, [\reg2] +	ldr	\reg1, [\reg2, \reg3] +	cmp	\mode, \reg1		@ matches primary CPU boot mode? +	orrne	r7, r7, #BOOT_CPU_MODE_MISMATCH +	strne	r7, [r5, r6]		@ record what happened and give up +	.endm + +#else	/* ZIMAGE */ + +	.macro	store_primary_cpu_mode	reg1:req, reg2:req, reg3:req +	.endm + +/* + * The zImage loader only runs on one CPU, so we don't bother with mult-CPU + * consistency checking: + */ +	.macro	compare_cpu_mode_with_primary mode, reg1, reg2, reg3 +	cmp	\mode, \mode +	.endm + +#endif /* ZIMAGE */ + +/* + * Hypervisor stub installation functions. + * + * These must be called with the MMU and D-cache off. + * They are not ABI compliant and are only intended to be called from the kernel + * entry points in head.S. + */ +@ Call this from the primary CPU +ENTRY(__hyp_stub_install) +	store_primary_cpu_mode	r4, r5, r6 +ENDPROC(__hyp_stub_install) + +	@ fall through... + +@ Secondary CPUs should call here +ENTRY(__hyp_stub_install_secondary) +	mrs	r4, cpsr +	and	r4, r4, #MODE_MASK + +	/* +	 * If the secondary has booted with a different mode, give up +	 * immediately. +	 */ +	compare_cpu_mode_with_primary	r4, r5, r6, r7 +	bxne	lr + +	/* +	 * Once we have given up on one CPU, we do not try to install the +	 * stub hypervisor on the remaining ones: because the saved boot mode +	 * is modified, it can't compare equal to the CPSR mode field any +	 * more. +	 * +	 * Otherwise... +	 */ + +	cmp	r4, #HYP_MODE +	bxne	lr			@ give up if the CPU is not in HYP mode + +/* + * Configure HSCTLR to set correct exception endianness/instruction set + * state etc. + * Turn off all traps + * Eventually, CPU-specific code might be needed -- assume not for now + * + * This code relies on the "eret" instruction to synchronize the + * various coprocessor accesses. + */ +	@ Now install the hypervisor stub: +	adr	r7, __hyp_stub_vectors +	mcr	p15, 4, r7, c12, c0, 0	@ set hypervisor vector base (HVBAR) + +	@ Disable all traps, so we don't get any nasty surprise +	mov	r7, #0 +	mcr	p15, 4, r7, c1, c1, 0	@ HCR +	mcr	p15, 4, r7, c1, c1, 2	@ HCPTR +	mcr	p15, 4, r7, c1, c1, 3	@ HSTR + +THUMB(	orr	r7, #(1 << 30)	)	@ HSCTLR.TE +#ifdef CONFIG_CPU_BIG_ENDIAN +	orr	r7, #(1 << 9)		@ HSCTLR.EE +#endif +	mcr	p15, 4, r7, c1, c0, 0	@ HSCTLR + +	mrc	p15, 4, r7, c1, c1, 1	@ HDCR +	and	r7, #0x1f		@ Preserve HPMN +	mcr	p15, 4, r7, c1, c1, 1	@ HDCR + +#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER) +	@ make CNTP_* and CNTPCT accessible from PL1 +	mrc	p15, 0, r7, c0, c1, 1	@ ID_PFR1 +	lsr	r7, #16 +	and	r7, #0xf +	cmp	r7, #1 +	bne	1f +	mrc	p15, 4, r7, c14, c1, 0	@ CNTHCTL +	orr	r7, r7, #3		@ PL1PCEN | PL1PCTEN +	mcr	p15, 4, r7, c14, c1, 0	@ CNTHCTL +1: +#endif + +	bic	r7, r4, #MODE_MASK +	orr	r7, r7, #SVC_MODE +THUMB(	orr	r7, r7, #PSR_T_BIT	) +	msr	spsr_cxsf, r7		@ This is SPSR_hyp. + +	__MSR_ELR_HYP(14)		@ msr elr_hyp, lr +	__ERET				@ return, switching to SVC mode +					@ The boot CPU mode is left in r4. +ENDPROC(__hyp_stub_install_secondary) + +__hyp_stub_do_trap: +	cmp	r0, #-1 +	mrceq	p15, 4, r0, c12, c0, 0	@ get HVBAR +	mcrne	p15, 4, r0, c12, c0, 0	@ set HVBAR +	__ERET +ENDPROC(__hyp_stub_do_trap) + +/* + * __hyp_set_vectors: Call this after boot to set the initial hypervisor + * vectors as part of hypervisor installation.  On an SMP system, this should + * be called on each CPU. + * + * r0 must be the physical address of the new vector table (which must lie in + * the bottom 4GB of physical address space. + * + * r0 must be 32-byte aligned. + * + * Before calling this, you must check that the stub hypervisor is installed + * everywhere, by waiting for any secondary CPUs to be brought up and then + * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true. + * + * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or + * something else went wrong... in such cases, trying to install a new + * hypervisor is unlikely to work as desired. + * + * When you call into your shiny new hypervisor, sp_hyp will contain junk, + * so you will need to set that to something sensible at the new hypervisor's + * initialisation entry point. + */ +ENTRY(__hyp_get_vectors) +	mov	r0, #-1 +ENDPROC(__hyp_get_vectors) +	@ fall through +ENTRY(__hyp_set_vectors) +	__HVC(0) +	bx	lr +ENDPROC(__hyp_set_vectors) + +#ifndef ZIMAGE +.align 2 +.L__boot_cpu_mode_offset: +	.long	__boot_cpu_mode - . +#endif + +.align 5 +__hyp_stub_vectors: +__hyp_stub_reset:	W(b)	. +__hyp_stub_und:		W(b)	. +__hyp_stub_svc:		W(b)	. +__hyp_stub_pabort:	W(b)	. +__hyp_stub_dabort:	W(b)	. +__hyp_stub_trap:	W(b)	__hyp_stub_do_trap +__hyp_stub_irq:		W(b)	. +__hyp_stub_fiq:		W(b)	. +ENDPROC(__hyp_stub_vectors) + diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 16cedb42c0c..896165096d6 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -34,6 +34,7 @@  #include <linux/list.h>  #include <linux/kallsyms.h>  #include <linux/proc_fs.h> +#include <linux/export.h>  #include <asm/exception.h>  #include <asm/mach/arch.h> @@ -109,6 +110,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)  	/* Order is clear bits in "clr" then set bits in "set" */  	irq_modify_status(irq, clr, set & ~clr);  } +EXPORT_SYMBOL_GPL(set_irq_flags);  void __init init_IRQ(void)  { diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c index 38c1a3b103a..83931290506 100644 --- a/arch/arm/kernel/kprobes-test-arm.c +++ b/arch/arm/kernel/kprobes-test-arm.c @@ -366,7 +366,9 @@ void kprobe_arm_test_cases(void)  	TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3")  	TEST_UNSUPPORTED(".word 0xe0500090 @ undef")  	TEST_UNSUPPORTED(".word 0xe05fff9f @ undef") +#endif +#if __LINUX_ARM_ARCH__ >= 7  	TEST_RRR(  "mls		r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")  	TEST_RRR(  "mlshi	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")  	TEST_RR(   "mls		lr, r",1, VAL2,", r",2, VAL3,", r13") @@ -456,6 +458,8 @@ void kprobe_arm_test_cases(void)  	TEST_UNSUPPORTED(".word	0xe1700090") /* Unallocated space */  #if __LINUX_ARM_ARCH__ >= 6  	TEST_UNSUPPORTED("ldrex	r2, [sp]") +#endif +#if (__LINUX_ARM_ARCH__ >= 7) || defined(CONFIG_CPU_32v6K)  	TEST_UNSUPPORTED("strexd	r0, r2, r3, [sp]")  	TEST_UNSUPPORTED("ldrexd	r2, r3, [sp]")  	TEST_UNSUPPORTED("strexb	r0, r2, [sp]") diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c deleted file mode 100644 index 1911dae19e4..00000000000 --- a/arch/arm/kernel/leds.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * LED support code, ripped out of arch/arm/kernel/time.c - * - *  Copyright (C) 1994-2001 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/export.h> -#include <linux/init.h> -#include <linux/device.h> -#include <linux/syscore_ops.h> -#include <linux/string.h> - -#include <asm/leds.h> - -static void dummy_leds_event(led_event_t evt) -{ -} - -void (*leds_event)(led_event_t) = dummy_leds_event; - -struct leds_evt_name { -	const char	name[8]; -	int		on; -	int		off; -}; - -static const struct leds_evt_name evt_names[] = { -	{ "amber", led_amber_on, led_amber_off }, -	{ "blue",  led_blue_on,  led_blue_off  }, -	{ "green", led_green_on, led_green_off }, -	{ "red",   led_red_on,   led_red_off   }, -}; - -static ssize_t leds_store(struct device *dev, -			struct device_attribute *attr, -			const char *buf, size_t size) -{ -	int ret = -EINVAL, len = strcspn(buf, " "); - -	if (len > 0 && buf[len] == '\0') -		len--; - -	if (strncmp(buf, "claim", len) == 0) { -		leds_event(led_claim); -		ret = size; -	} else if (strncmp(buf, "release", len) == 0) { -		leds_event(led_release); -		ret = size; -	} else { -		int i; - -		for (i = 0; i < ARRAY_SIZE(evt_names); i++) { -			if (strlen(evt_names[i].name) != len || -			    strncmp(buf, evt_names[i].name, len) != 0) -				continue; -			if (strncmp(buf+len, " on", 3) == 0) { -				leds_event(evt_names[i].on); -				ret = size; -			} else if (strncmp(buf+len, " off", 4) == 0) { -				leds_event(evt_names[i].off); -				ret = size; -			} -			break; -		} -	} -	return ret; -} - -static DEVICE_ATTR(event, 0200, NULL, leds_store); - -static struct bus_type leds_subsys = { -	.name		= "leds", -	.dev_name	= "leds", -}; - -static struct device leds_device = { -	.id		= 0, -	.bus		= &leds_subsys, -}; - -static int leds_suspend(void) -{ -	leds_event(led_stop); -	return 0; -} - -static void leds_resume(void) -{ -	leds_event(led_start); -} - -static void leds_shutdown(void) -{ -	leds_event(led_halted); -} - -static struct syscore_ops leds_syscore_ops = { -	.shutdown	= leds_shutdown, -	.suspend	= leds_suspend, -	.resume		= leds_resume, -}; - -static int __init leds_init(void) -{ -	int ret; -	ret = subsys_system_register(&leds_subsys, NULL); -	if (ret == 0) -		ret = device_register(&leds_device); -	if (ret == 0) -		ret = device_create_file(&leds_device, &dev_attr_event); -	if (ret == 0) -		register_syscore_ops(&leds_syscore_ops); -	return ret; -} - -device_initcall(leds_init); - -EXPORT_SYMBOL(leds_event); diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index dfcdb9f7c12..8ef8c933780 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -8,7 +8,9 @@  #include <linux/reboot.h>  #include <linux/io.h>  #include <linux/irq.h> +#include <linux/memblock.h>  #include <asm/pgtable.h> +#include <linux/of_fdt.h>  #include <asm/pgalloc.h>  #include <asm/mmu_context.h>  #include <asm/cacheflush.h> @@ -32,6 +34,28 @@ static atomic_t waiting_for_crash_ipi;  int machine_kexec_prepare(struct kimage *image)  { +	struct kexec_segment *current_segment; +	__be32 header; +	int i, err; + +	/* +	 * No segment at default ATAGs address. try to locate +	 * a dtb using magic. +	 */ +	for (i = 0; i < image->nr_segments; i++) { +		current_segment = &image->segment[i]; + +		if (!memblock_is_region_memory(current_segment->mem, +					       current_segment->memsz)) +			return -EINVAL; + +		err = get_user(header, (__be32*)current_segment->buf); +		if (err) +			return err; + +		if (be32_to_cpu(header) == OF_DT_HEADER) +			kexec_boot_atags = current_segment->mem; +	}  	return 0;  } @@ -122,7 +146,9 @@ void machine_kexec(struct kimage *image)  	kexec_start_address = image->start;  	kexec_indirection_page = page_list;  	kexec_mach_type = machine_arch_type; -	kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; +	if (!kexec_boot_atags) +		kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; +  	/* copy our kernel relocation code to the control code page */  	memcpy(reboot_code_buffer, diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index ab243b87118..53c0304b734 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -12,68 +12,15 @@   */  #define pr_fmt(fmt) "hw perfevents: " fmt -#include <linux/bitmap.h> -#include <linux/interrupt.h>  #include <linux/kernel.h> -#include <linux/export.h> -#include <linux/perf_event.h>  #include <linux/platform_device.h> -#include <linux/spinlock.h> +#include <linux/pm_runtime.h>  #include <linux/uaccess.h> -#include <asm/cputype.h> -#include <asm/irq.h>  #include <asm/irq_regs.h>  #include <asm/pmu.h>  #include <asm/stacktrace.h> -/* - * ARMv6 supports a maximum of 3 events, starting from index 0. If we add - * another platform that supports more, we need to increase this to be the - * largest of all platforms. - * - * ARMv7 supports up to 32 events: - *  cycle counter CCNT + 31 events counters CNT0..30. - *  Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. - */ -#define ARMPMU_MAX_HWEVENTS		32 - -static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); -static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); -static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); - -#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) - -/* Set at runtime when we know what CPU type we are. */ -static struct arm_pmu *cpu_pmu; - -const char *perf_pmu_name(void) -{ -	if (!cpu_pmu) -		return NULL; - -	return cpu_pmu->pmu.name; -} -EXPORT_SYMBOL_GPL(perf_pmu_name); - -int perf_num_counters(void) -{ -	int max_events = 0; - -	if (cpu_pmu != NULL) -		max_events = cpu_pmu->num_events; - -	return max_events; -} -EXPORT_SYMBOL_GPL(perf_num_counters); - -#define HW_OP_UNSUPPORTED		0xFFFF - -#define C(_x) \ -	PERF_COUNT_HW_CACHE_##_x - -#define CACHE_OP_UNSUPPORTED		0xFFFF -  static int  armpmu_map_cache_event(const unsigned (*cache_map)  				      [PERF_COUNT_HW_CACHE_MAX] @@ -104,7 +51,7 @@ armpmu_map_cache_event(const unsigned (*cache_map)  }  static int -armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) +armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)  {  	int mapping = (*event_map)[config];  	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; @@ -116,19 +63,20 @@ armpmu_map_raw_event(u32 raw_event_mask, u64 config)  	return (int)(config & raw_event_mask);  } -static int map_cpu_event(struct perf_event *event, -			 const unsigned (*event_map)[PERF_COUNT_HW_MAX], -			 const unsigned (*cache_map) -					[PERF_COUNT_HW_CACHE_MAX] -					[PERF_COUNT_HW_CACHE_OP_MAX] -					[PERF_COUNT_HW_CACHE_RESULT_MAX], -			 u32 raw_event_mask) +int +armpmu_map_event(struct perf_event *event, +		 const unsigned (*event_map)[PERF_COUNT_HW_MAX], +		 const unsigned (*cache_map) +				[PERF_COUNT_HW_CACHE_MAX] +				[PERF_COUNT_HW_CACHE_OP_MAX] +				[PERF_COUNT_HW_CACHE_RESULT_MAX], +		 u32 raw_event_mask)  {  	u64 config = event->attr.config;  	switch (event->attr.type) {  	case PERF_TYPE_HARDWARE: -		return armpmu_map_event(event_map, config); +		return armpmu_map_hw_event(event_map, config);  	case PERF_TYPE_HW_CACHE:  		return armpmu_map_cache_event(cache_map, config);  	case PERF_TYPE_RAW: @@ -148,6 +96,10 @@ armpmu_event_set_period(struct perf_event *event,  	s64 period = hwc->sample_period;  	int ret = 0; +	/* The period may have been changed by PERF_EVENT_IOC_PERIOD */ +	if (unlikely(period != hwc->last_period)) +		left = period - (hwc->last_period - left); +  	if (unlikely(left <= -period)) {  		left = period;  		local64_set(&hwc->period_left, left); @@ -222,7 +174,6 @@ armpmu_stop(struct perf_event *event, int flags)  	 */  	if (!(hwc->state & PERF_HES_STOPPED)) {  		armpmu->disable(hwc, hwc->idx); -		barrier(); /* why? */  		armpmu_event_update(event, hwc, hwc->idx);  		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;  	} @@ -350,99 +301,41 @@ validate_group(struct perf_event *event)  	return 0;  } -static irqreturn_t armpmu_platform_irq(int irq, void *dev) +static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)  {  	struct arm_pmu *armpmu = (struct arm_pmu *) dev;  	struct platform_device *plat_device = armpmu->plat_device;  	struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); -	return plat->handle_irq(irq, dev, armpmu->handle_irq); +	if (plat && plat->handle_irq) +		return plat->handle_irq(irq, dev, armpmu->handle_irq); +	else +		return armpmu->handle_irq(irq, dev);  }  static void  armpmu_release_hardware(struct arm_pmu *armpmu)  { -	int i, irq, irqs; -	struct platform_device *pmu_device = armpmu->plat_device; -	struct arm_pmu_platdata *plat = -		dev_get_platdata(&pmu_device->dev); - -	irqs = min(pmu_device->num_resources, num_possible_cpus()); - -	for (i = 0; i < irqs; ++i) { -		if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) -			continue; -		irq = platform_get_irq(pmu_device, i); -		if (irq >= 0) { -			if (plat && plat->disable_irq) -				plat->disable_irq(irq); -			free_irq(irq, armpmu); -		} -	} - -	release_pmu(armpmu->type); +	armpmu->free_irq(); +	pm_runtime_put_sync(&armpmu->plat_device->dev);  }  static int  armpmu_reserve_hardware(struct arm_pmu *armpmu)  { -	struct arm_pmu_platdata *plat; -	irq_handler_t handle_irq; -	int i, err, irq, irqs; +	int err;  	struct platform_device *pmu_device = armpmu->plat_device;  	if (!pmu_device)  		return -ENODEV; -	err = reserve_pmu(armpmu->type); +	pm_runtime_get_sync(&pmu_device->dev); +	err = armpmu->request_irq(armpmu_dispatch_irq);  	if (err) { -		pr_warning("unable to reserve pmu\n"); +		armpmu_release_hardware(armpmu);  		return err;  	} -	plat = dev_get_platdata(&pmu_device->dev); -	if (plat && plat->handle_irq) -		handle_irq = armpmu_platform_irq; -	else -		handle_irq = armpmu->handle_irq; - -	irqs = min(pmu_device->num_resources, num_possible_cpus()); -	if (irqs < 1) { -		pr_err("no irqs for PMUs defined\n"); -		return -ENODEV; -	} - -	for (i = 0; i < irqs; ++i) { -		err = 0; -		irq = platform_get_irq(pmu_device, i); -		if (irq < 0) -			continue; - -		/* -		 * If we have a single PMU interrupt that we can't shift, -		 * assume that we're running on a uniprocessor machine and -		 * continue. Otherwise, continue without this interrupt. -		 */ -		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { -			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", -				    irq, i); -			continue; -		} - -		err = request_irq(irq, handle_irq, -				  IRQF_DISABLED | IRQF_NOBALANCING, -				  "arm-pmu", armpmu); -		if (err) { -			pr_err("unable to request IRQ%d for ARM PMU counters\n", -				irq); -			armpmu_release_hardware(armpmu); -			return err; -		} else if (plat && plat->enable_irq) -			plat->enable_irq(irq); - -		cpumask_set_cpu(i, &armpmu->active_irqs); -	} -  	return 0;  } @@ -581,6 +474,32 @@ static void armpmu_disable(struct pmu *pmu)  	armpmu->stop();  } +#ifdef CONFIG_PM_RUNTIME +static int armpmu_runtime_resume(struct device *dev) +{ +	struct arm_pmu_platdata *plat = dev_get_platdata(dev); + +	if (plat && plat->runtime_resume) +		return plat->runtime_resume(dev); + +	return 0; +} + +static int armpmu_runtime_suspend(struct device *dev) +{ +	struct arm_pmu_platdata *plat = dev_get_platdata(dev); + +	if (plat && plat->runtime_suspend) +		return plat->runtime_suspend(dev); + +	return 0; +} +#endif + +const struct dev_pm_ops armpmu_dev_pm_ops = { +	SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) +}; +  static void __init armpmu_init(struct arm_pmu *armpmu)  {  	atomic_set(&armpmu->active_events, 0); @@ -598,174 +517,14 @@ static void __init armpmu_init(struct arm_pmu *armpmu)  	};  } -int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) +int armpmu_register(struct arm_pmu *armpmu, char *name, int type)  {  	armpmu_init(armpmu); +	pr_info("enabled with %s PMU driver, %d counters available\n", +			armpmu->name, armpmu->num_events);  	return perf_pmu_register(&armpmu->pmu, name, type);  } -/* Include the PMU-specific implementations. */ -#include "perf_event_xscale.c" -#include "perf_event_v6.c" -#include "perf_event_v7.c" - -/* - * Ensure the PMU has sane values out of reset. - * This requires SMP to be available, so exists as a separate initcall. - */ -static int __init -cpu_pmu_reset(void) -{ -	if (cpu_pmu && cpu_pmu->reset) -		return on_each_cpu(cpu_pmu->reset, NULL, 1); -	return 0; -} -arch_initcall(cpu_pmu_reset); - -/* - * PMU platform driver and devicetree bindings. - */ -static struct of_device_id armpmu_of_device_ids[] = { -	{.compatible = "arm,cortex-a9-pmu"}, -	{.compatible = "arm,cortex-a8-pmu"}, -	{.compatible = "arm,arm1136-pmu"}, -	{.compatible = "arm,arm1176-pmu"}, -	{}, -}; - -static struct platform_device_id armpmu_plat_device_ids[] = { -	{.name = "arm-pmu"}, -	{}, -}; - -static int __devinit armpmu_device_probe(struct platform_device *pdev) -{ -	if (!cpu_pmu) -		return -ENODEV; - -	cpu_pmu->plat_device = pdev; -	return 0; -} - -static struct platform_driver armpmu_driver = { -	.driver		= { -		.name	= "arm-pmu", -		.of_match_table = armpmu_of_device_ids, -	}, -	.probe		= armpmu_device_probe, -	.id_table	= armpmu_plat_device_ids, -}; - -static int __init register_pmu_driver(void) -{ -	return platform_driver_register(&armpmu_driver); -} -device_initcall(register_pmu_driver); - -static struct pmu_hw_events *armpmu_get_cpu_events(void) -{ -	return &__get_cpu_var(cpu_hw_events); -} - -static void __init cpu_pmu_init(struct arm_pmu *armpmu) -{ -	int cpu; -	for_each_possible_cpu(cpu) { -		struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); -		events->events = per_cpu(hw_events, cpu); -		events->used_mask = per_cpu(used_mask, cpu); -		raw_spin_lock_init(&events->pmu_lock); -	} -	armpmu->get_hw_events = armpmu_get_cpu_events; -	armpmu->type = ARM_PMU_DEVICE_CPU; -} - -/* - * PMU hardware loses all context when a CPU goes offline. - * When a CPU is hotplugged back in, since some hardware registers are - * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading - * junk values out of them. - */ -static int __cpuinit pmu_cpu_notify(struct notifier_block *b, -					unsigned long action, void *hcpu) -{ -	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) -		return NOTIFY_DONE; - -	if (cpu_pmu && cpu_pmu->reset) -		cpu_pmu->reset(NULL); - -	return NOTIFY_OK; -} - -static struct notifier_block __cpuinitdata pmu_cpu_notifier = { -	.notifier_call = pmu_cpu_notify, -}; - -/* - * CPU PMU identification and registration. - */ -static int __init -init_hw_perf_events(void) -{ -	unsigned long cpuid = read_cpuid_id(); -	unsigned long implementor = (cpuid & 0xFF000000) >> 24; -	unsigned long part_number = (cpuid & 0xFFF0); - -	/* ARM Ltd CPUs. */ -	if (0x41 == implementor) { -		switch (part_number) { -		case 0xB360:	/* ARM1136 */ -		case 0xB560:	/* ARM1156 */ -		case 0xB760:	/* ARM1176 */ -			cpu_pmu = armv6pmu_init(); -			break; -		case 0xB020:	/* ARM11mpcore */ -			cpu_pmu = armv6mpcore_pmu_init(); -			break; -		case 0xC080:	/* Cortex-A8 */ -			cpu_pmu = armv7_a8_pmu_init(); -			break; -		case 0xC090:	/* Cortex-A9 */ -			cpu_pmu = armv7_a9_pmu_init(); -			break; -		case 0xC050:	/* Cortex-A5 */ -			cpu_pmu = armv7_a5_pmu_init(); -			break; -		case 0xC0F0:	/* Cortex-A15 */ -			cpu_pmu = armv7_a15_pmu_init(); -			break; -		case 0xC070:	/* Cortex-A7 */ -			cpu_pmu = armv7_a7_pmu_init(); -			break; -		} -	/* Intel CPUs [xscale]. */ -	} else if (0x69 == implementor) { -		part_number = (cpuid >> 13) & 0x7; -		switch (part_number) { -		case 1: -			cpu_pmu = xscale1pmu_init(); -			break; -		case 2: -			cpu_pmu = xscale2pmu_init(); -			break; -		} -	} - -	if (cpu_pmu) { -		pr_info("enabled with %s PMU driver, %d counters available\n", -			cpu_pmu->name, cpu_pmu->num_events); -		cpu_pmu_init(cpu_pmu); -		register_cpu_notifier(&pmu_cpu_notifier); -		armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); -	} else { -		pr_info("no hardware support available\n"); -	} - -	return 0; -} -early_initcall(init_hw_perf_events); -  /*   * Callchain handling code.   */ diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c new file mode 100644 index 00000000000..8d7d8d4de9d --- /dev/null +++ b/arch/arm/kernel/perf_event_cpu.c @@ -0,0 +1,295 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ +#define pr_fmt(fmt) "CPU PMU: " fmt + +#include <linux/bitmap.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#include <asm/cputype.h> +#include <asm/irq_regs.h> +#include <asm/pmu.h> + +/* Set at runtime when we know what CPU type we are. */ +static struct arm_pmu *cpu_pmu; + +static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); +static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); +static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); + +/* + * Despite the names, these two functions are CPU-specific and are used + * by the OProfile/perf code. + */ +const char *perf_pmu_name(void) +{ +	if (!cpu_pmu) +		return NULL; + +	return cpu_pmu->pmu.name; +} +EXPORT_SYMBOL_GPL(perf_pmu_name); + +int perf_num_counters(void) +{ +	int max_events = 0; + +	if (cpu_pmu != NULL) +		max_events = cpu_pmu->num_events; + +	return max_events; +} +EXPORT_SYMBOL_GPL(perf_num_counters); + +/* Include the PMU-specific implementations. */ +#include "perf_event_xscale.c" +#include "perf_event_v6.c" +#include "perf_event_v7.c" + +static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) +{ +	return &__get_cpu_var(cpu_hw_events); +} + +static void cpu_pmu_free_irq(void) +{ +	int i, irq, irqs; +	struct platform_device *pmu_device = cpu_pmu->plat_device; + +	irqs = min(pmu_device->num_resources, num_possible_cpus()); + +	for (i = 0; i < irqs; ++i) { +		if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) +			continue; +		irq = platform_get_irq(pmu_device, i); +		if (irq >= 0) +			free_irq(irq, cpu_pmu); +	} +} + +static int cpu_pmu_request_irq(irq_handler_t handler) +{ +	int i, err, irq, irqs; +	struct platform_device *pmu_device = cpu_pmu->plat_device; + +	if (!pmu_device) +		return -ENODEV; + +	irqs = min(pmu_device->num_resources, num_possible_cpus()); +	if (irqs < 1) { +		pr_err("no irqs for PMUs defined\n"); +		return -ENODEV; +	} + +	for (i = 0; i < irqs; ++i) { +		err = 0; +		irq = platform_get_irq(pmu_device, i); +		if (irq < 0) +			continue; + +		/* +		 * If we have a single PMU interrupt that we can't shift, +		 * assume that we're running on a uniprocessor machine and +		 * continue. Otherwise, continue without this interrupt. +		 */ +		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { +			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", +				    irq, i); +			continue; +		} + +		err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu", +				  cpu_pmu); +		if (err) { +			pr_err("unable to request IRQ%d for ARM PMU counters\n", +				irq); +			return err; +		} + +		cpumask_set_cpu(i, &cpu_pmu->active_irqs); +	} + +	return 0; +} + +static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) +{ +	int cpu; +	for_each_possible_cpu(cpu) { +		struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); +		events->events = per_cpu(hw_events, cpu); +		events->used_mask = per_cpu(used_mask, cpu); +		raw_spin_lock_init(&events->pmu_lock); +	} + +	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events; +	cpu_pmu->request_irq	= cpu_pmu_request_irq; +	cpu_pmu->free_irq	= cpu_pmu_free_irq; + +	/* Ensure the PMU has sane values out of reset. */ +	if (cpu_pmu && cpu_pmu->reset) +		on_each_cpu(cpu_pmu->reset, NULL, 1); +} + +/* + * PMU hardware loses all context when a CPU goes offline. + * When a CPU is hotplugged back in, since some hardware registers are + * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading + * junk values out of them. + */ +static int __cpuinit cpu_pmu_notify(struct notifier_block *b, +				    unsigned long action, void *hcpu) +{ +	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) +		return NOTIFY_DONE; + +	if (cpu_pmu && cpu_pmu->reset) +		cpu_pmu->reset(NULL); + +	return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { +	.notifier_call = cpu_pmu_notify, +}; + +/* + * PMU platform driver and devicetree bindings. + */ +static struct of_device_id __devinitdata cpu_pmu_of_device_ids[] = { +	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init}, +	{.compatible = "arm,cortex-a9-pmu",	.data = armv7_a9_pmu_init}, +	{.compatible = "arm,cortex-a8-pmu",	.data = armv7_a8_pmu_init}, +	{.compatible = "arm,cortex-a7-pmu",	.data = armv7_a7_pmu_init}, +	{.compatible = "arm,cortex-a5-pmu",	.data = armv7_a5_pmu_init}, +	{.compatible = "arm,arm11mpcore-pmu",	.data = armv6mpcore_pmu_init}, +	{.compatible = "arm,arm1176-pmu",	.data = armv6pmu_init}, +	{.compatible = "arm,arm1136-pmu",	.data = armv6pmu_init}, +	{}, +}; + +static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = { +	{.name = "arm-pmu"}, +	{}, +}; + +/* + * CPU PMU identification and probing. + */ +static struct arm_pmu *__devinit probe_current_pmu(void) +{ +	struct arm_pmu *pmu = NULL; +	int cpu = get_cpu(); +	unsigned long cpuid = read_cpuid_id(); +	unsigned long implementor = (cpuid & 0xFF000000) >> 24; +	unsigned long part_number = (cpuid & 0xFFF0); + +	pr_info("probing PMU on CPU %d\n", cpu); + +	/* ARM Ltd CPUs. */ +	if (0x41 == implementor) { +		switch (part_number) { +		case 0xB360:	/* ARM1136 */ +		case 0xB560:	/* ARM1156 */ +		case 0xB760:	/* ARM1176 */ +			pmu = armv6pmu_init(); +			break; +		case 0xB020:	/* ARM11mpcore */ +			pmu = armv6mpcore_pmu_init(); +			break; +		case 0xC080:	/* Cortex-A8 */ +			pmu = armv7_a8_pmu_init(); +			break; +		case 0xC090:	/* Cortex-A9 */ +			pmu = armv7_a9_pmu_init(); +			break; +		case 0xC050:	/* Cortex-A5 */ +			pmu = armv7_a5_pmu_init(); +			break; +		case 0xC0F0:	/* Cortex-A15 */ +			pmu = armv7_a15_pmu_init(); +			break; +		case 0xC070:	/* Cortex-A7 */ +			pmu = armv7_a7_pmu_init(); +			break; +		} +	/* Intel CPUs [xscale]. */ +	} else if (0x69 == implementor) { +		part_number = (cpuid >> 13) & 0x7; +		switch (part_number) { +		case 1: +			pmu = xscale1pmu_init(); +			break; +		case 2: +			pmu = xscale2pmu_init(); +			break; +		} +	} + +	put_cpu(); +	return pmu; +} + +static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) +{ +	const struct of_device_id *of_id; +	struct arm_pmu *(*init_fn)(void); +	struct device_node *node = pdev->dev.of_node; + +	if (cpu_pmu) { +		pr_info("attempt to register multiple PMU devices!"); +		return -ENOSPC; +	} + +	if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { +		init_fn = of_id->data; +		cpu_pmu = init_fn(); +	} else { +		cpu_pmu = probe_current_pmu(); +	} + +	if (!cpu_pmu) +		return -ENODEV; + +	cpu_pmu->plat_device = pdev; +	cpu_pmu_init(cpu_pmu); +	register_cpu_notifier(&cpu_pmu_hotplug_notifier); +	armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); + +	return 0; +} + +static struct platform_driver cpu_pmu_driver = { +	.driver		= { +		.name	= "arm-pmu", +		.pm	= &armpmu_dev_pm_ops, +		.of_match_table = cpu_pmu_of_device_ids, +	}, +	.probe		= cpu_pmu_device_probe, +	.id_table	= cpu_pmu_plat_device_ids, +}; + +static int __init register_pmu_driver(void) +{ +	return platform_driver_register(&cpu_pmu_driver); +} +device_initcall(register_pmu_driver); diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index c90fcb2b696..6ccc0797174 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -645,7 +645,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,  static int armv6_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv6_perf_map, +	return armpmu_map_event(event, &armv6_perf_map,  				&armv6_perf_cache_map, 0xFF);  } @@ -664,7 +664,7 @@ static struct arm_pmu armv6pmu = {  	.max_period		= (1LLU << 32) - 1,  }; -static struct arm_pmu *__init armv6pmu_init(void) +static struct arm_pmu *__devinit armv6pmu_init(void)  {  	return &armv6pmu;  } @@ -679,7 +679,7 @@ static struct arm_pmu *__init armv6pmu_init(void)  static int armv6mpcore_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv6mpcore_perf_map, +	return armpmu_map_event(event, &armv6mpcore_perf_map,  				&armv6mpcore_perf_cache_map, 0xFF);  } @@ -698,17 +698,17 @@ static struct arm_pmu armv6mpcore_pmu = {  	.max_period		= (1LLU << 32) - 1,  }; -static struct arm_pmu *__init armv6mpcore_pmu_init(void) +static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)  {  	return &armv6mpcore_pmu;  }  #else -static struct arm_pmu *__init armv6pmu_init(void) +static struct arm_pmu *__devinit armv6pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init armv6mpcore_pmu_init(void) +static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)  {  	return NULL;  } diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f04070bd218..bd4b090ebcf 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1204,31 +1204,31 @@ static void armv7pmu_reset(void *info)  static int armv7_a8_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv7_a8_perf_map, +	return armpmu_map_event(event, &armv7_a8_perf_map,  				&armv7_a8_perf_cache_map, 0xFF);  }  static int armv7_a9_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv7_a9_perf_map, +	return armpmu_map_event(event, &armv7_a9_perf_map,  				&armv7_a9_perf_cache_map, 0xFF);  }  static int armv7_a5_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv7_a5_perf_map, +	return armpmu_map_event(event, &armv7_a5_perf_map,  				&armv7_a5_perf_cache_map, 0xFF);  }  static int armv7_a15_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv7_a15_perf_map, +	return armpmu_map_event(event, &armv7_a15_perf_map,  				&armv7_a15_perf_cache_map, 0xFF);  }  static int armv7_a7_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv7_a7_perf_map, +	return armpmu_map_event(event, &armv7_a7_perf_map,  				&armv7_a7_perf_cache_map, 0xFF);  } @@ -1245,7 +1245,7 @@ static struct arm_pmu armv7pmu = {  	.max_period		= (1LLU << 32) - 1,  }; -static u32 __init armv7_read_num_pmnc_events(void) +static u32 __devinit armv7_read_num_pmnc_events(void)  {  	u32 nb_cnt; @@ -1256,7 +1256,7 @@ static u32 __init armv7_read_num_pmnc_events(void)  	return nb_cnt + 1;  } -static struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__devinit armv7_a8_pmu_init(void)  {  	armv7pmu.name		= "ARMv7 Cortex-A8";  	armv7pmu.map_event	= armv7_a8_map_event; @@ -1264,7 +1264,7 @@ static struct arm_pmu *__init armv7_a8_pmu_init(void)  	return &armv7pmu;  } -static struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__devinit armv7_a9_pmu_init(void)  {  	armv7pmu.name		= "ARMv7 Cortex-A9";  	armv7pmu.map_event	= armv7_a9_map_event; @@ -1272,7 +1272,7 @@ static struct arm_pmu *__init armv7_a9_pmu_init(void)  	return &armv7pmu;  } -static struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__devinit armv7_a5_pmu_init(void)  {  	armv7pmu.name		= "ARMv7 Cortex-A5";  	armv7pmu.map_event	= armv7_a5_map_event; @@ -1280,7 +1280,7 @@ static struct arm_pmu *__init armv7_a5_pmu_init(void)  	return &armv7pmu;  } -static struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__devinit armv7_a15_pmu_init(void)  {  	armv7pmu.name		= "ARMv7 Cortex-A15";  	armv7pmu.map_event	= armv7_a15_map_event; @@ -1289,7 +1289,7 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)  	return &armv7pmu;  } -static struct arm_pmu *__init armv7_a7_pmu_init(void) +static struct arm_pmu *__devinit armv7_a7_pmu_init(void)  {  	armv7pmu.name		= "ARMv7 Cortex-A7";  	armv7pmu.map_event	= armv7_a7_map_event; @@ -1298,27 +1298,27 @@ static struct arm_pmu *__init armv7_a7_pmu_init(void)  	return &armv7pmu;  }  #else -static struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__devinit armv7_a8_pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__devinit armv7_a9_pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__devinit armv7_a5_pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__devinit armv7_a15_pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init armv7_a7_pmu_init(void) +static struct arm_pmu *__devinit armv7_a7_pmu_init(void)  {  	return NULL;  } diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index f759fe0bab6..426e19f380a 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -430,7 +430,7 @@ xscale1pmu_write_counter(int counter, u32 val)  static int xscale_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &xscale_perf_map, +	return armpmu_map_event(event, &xscale_perf_map,  				&xscale_perf_cache_map, 0xFF);  } @@ -449,7 +449,7 @@ static struct arm_pmu xscale1pmu = {  	.max_period	= (1LLU << 32) - 1,  }; -static struct arm_pmu *__init xscale1pmu_init(void) +static struct arm_pmu *__devinit xscale1pmu_init(void)  {  	return &xscale1pmu;  } @@ -816,17 +816,17 @@ static struct arm_pmu xscale2pmu = {  	.max_period	= (1LLU << 32) - 1,  }; -static struct arm_pmu *__init xscale2pmu_init(void) +static struct arm_pmu *__devinit xscale2pmu_init(void)  {  	return &xscale2pmu;  }  #else -static struct arm_pmu *__init xscale1pmu_init(void) +static struct arm_pmu *__devinit xscale1pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init xscale2pmu_init(void) +static struct arm_pmu *__devinit xscale2pmu_init(void)  {  	return NULL;  } diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c deleted file mode 100644 index 2334bf8a650..00000000000 --- a/arch/arm/kernel/pmu.c +++ /dev/null @@ -1,36 +0,0 @@ -/* - *  linux/arch/arm/kernel/pmu.c - * - *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles - *  Copyright (C) 2010 ARM Ltd, Will Deacon - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include <linux/err.h> -#include <linux/kernel.h> -#include <linux/module.h> - -#include <asm/pmu.h> - -/* - * PMU locking to ensure mutual exclusion between different subsystems. - */ -static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)]; - -int -reserve_pmu(enum arm_pmu_type type) -{ -	return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0; -} -EXPORT_SYMBOL_GPL(reserve_pmu); - -void -release_pmu(enum arm_pmu_type type) -{ -	clear_bit_unlock(type, pmu_lock); -} -EXPORT_SYMBOL_GPL(release_pmu); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 693b744fd57..90084a6de35 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -31,9 +31,9 @@  #include <linux/random.h>  #include <linux/hw_breakpoint.h>  #include <linux/cpuidle.h> +#include <linux/leds.h>  #include <asm/cacheflush.h> -#include <asm/leds.h>  #include <asm/processor.h>  #include <asm/thread_notify.h>  #include <asm/stacktrace.h> @@ -189,7 +189,7 @@ void cpu_idle(void)  	while (1) {  		tick_nohz_idle_enter();  		rcu_idle_enter(); -		leds_event(led_idle_start); +		ledtrig_cpu(CPU_LED_IDLE_START);  		while (!need_resched()) {  #ifdef CONFIG_HOTPLUG_CPU  			if (cpu_is_offline(smp_processor_id())) @@ -220,7 +220,7 @@ void cpu_idle(void)  			} else  				local_irq_enable();  		} -		leds_event(led_idle_end); +		ledtrig_cpu(CPU_LED_IDLE_END);  		rcu_idle_exit();  		tick_nohz_idle_exit();  		schedule_preempt_disabled(); @@ -381,13 +381,20 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,  	struct thread_info *thread = task_thread_info(p);  	struct pt_regs *childregs = task_pt_regs(p); -	*childregs = *regs; -	childregs->ARM_r0 = 0; -	childregs->ARM_sp = stack_start; -  	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); -	thread->cpu_context.sp = (unsigned long)childregs; + +	if (likely(regs)) { +		*childregs = *regs; +		childregs->ARM_r0 = 0; +		childregs->ARM_sp = stack_start; +	} else { +		memset(childregs, 0, sizeof(struct pt_regs)); +		thread->cpu_context.r4 = stk_sz; +		thread->cpu_context.r5 = stack_start; +		childregs->ARM_cpsr = SVC_MODE; +	}  	thread->cpu_context.pc = (unsigned long)ret_from_fork; +	thread->cpu_context.sp = (unsigned long)childregs;  	clear_ptrace_hw_breakpoint(p); @@ -423,63 +430,6 @@ int dump_fpu (struct pt_regs *regs, struct user_fp *fp)  }  EXPORT_SYMBOL(dump_fpu); -/* - * Shuffle the argument into the correct register before calling the - * thread function.  r4 is the thread argument, r5 is the pointer to - * the thread function, and r6 points to the exit function. - */ -extern void kernel_thread_helper(void); -asm(	".pushsection .text\n" -"	.align\n" -"	.type	kernel_thread_helper, #function\n" -"kernel_thread_helper:\n" -#ifdef CONFIG_TRACE_IRQFLAGS -"	bl	trace_hardirqs_on\n" -#endif -"	msr	cpsr_c, r7\n" -"	mov	r0, r4\n" -"	mov	lr, r6\n" -"	mov	pc, r5\n" -"	.size	kernel_thread_helper, . - kernel_thread_helper\n" -"	.popsection"); - -#ifdef CONFIG_ARM_UNWIND -extern void kernel_thread_exit(long code); -asm(	".pushsection .text\n" -"	.align\n" -"	.type	kernel_thread_exit, #function\n" -"kernel_thread_exit:\n" -"	.fnstart\n" -"	.cantunwind\n" -"	bl	do_exit\n" -"	nop\n" -"	.fnend\n" -"	.size	kernel_thread_exit, . - kernel_thread_exit\n" -"	.popsection"); -#else -#define kernel_thread_exit	do_exit -#endif - -/* - * Create a kernel thread. - */ -pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) -{ -	struct pt_regs regs; - -	memset(®s, 0, sizeof(regs)); - -	regs.ARM_r4 = (unsigned long)arg; -	regs.ARM_r5 = (unsigned long)fn; -	regs.ARM_r6 = (unsigned long)kernel_thread_exit; -	regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE; -	regs.ARM_pc = (unsigned long)kernel_thread_helper; -	regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT; - -	return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); -} -EXPORT_SYMBOL(kernel_thread); -  unsigned long get_wchan(struct task_struct *p)  {  	struct stackframe frame; diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 3e0fc5f7ed4..739db3a1b2d 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -30,6 +30,9 @@  #include <asm/pgtable.h>  #include <asm/traps.h> +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> +  #define REG_PC	15  #define REG_PSR	16  /* @@ -918,11 +921,11 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,  {  	unsigned long ip; +	current_thread_info()->syscall = scno; +  	if (!test_thread_flag(TIF_SYSCALL_TRACE))  		return scno; -	current_thread_info()->syscall = scno; -  	/*  	 * IP is used to denote syscall entry/exit:  	 * IP = 0 -> entry, =1 -> exit @@ -941,15 +944,19 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,  asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)  { -	int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); +	scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); +	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) +		trace_sys_enter(regs, scno);  	audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1,  			    regs->ARM_r2, regs->ARM_r3); -	return ret; +	return scno;  }  asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno)  { -	int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); +	scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); +	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) +		trace_sys_exit(regs, scno);  	audit_syscall_exit(regs); -	return ret; +	return scno;  } diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index f4515393248..fc6692e2b60 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -9,6 +9,7 @@  #include <linux/init.h>  #include <linux/jiffies.h>  #include <linux/kernel.h> +#include <linux/moduleparam.h>  #include <linux/sched.h>  #include <linux/syscore_ops.h>  #include <linux/timer.h> @@ -27,6 +28,9 @@ struct clock_data {  static void sched_clock_poll(unsigned long wrap_ticks);  static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); +static int irqtime = -1; + +core_param(irqtime, irqtime, int, 0400);  static struct clock_data cd = {  	.mult	= NSEC_PER_SEC / HZ, @@ -103,13 +107,6 @@ static void sched_clock_poll(unsigned long wrap_ticks)  	update_sched_clock();  } -void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, -		unsigned long rate) -{ -	setup_sched_clock(read, bits, rate); -	cd.needs_suspend = true; -} -  void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)  {  	unsigned long r, w; @@ -157,6 +154,10 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)  	 */  	cd.epoch_ns = 0; +	/* Enable IRQ time accounting if we have a fast enough sched_clock */ +	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) +		enable_sched_clock_irqtime(); +  	pr_debug("Registered %pF as sched_clock source\n", read);  } @@ -181,18 +182,15 @@ void __init sched_clock_postinit(void)  static int sched_clock_suspend(void)  {  	sched_clock_poll(sched_clock_timer.data); -	if (cd.needs_suspend) -		cd.suspended = true; +	cd.suspended = true;  	return 0;  }  static void sched_clock_resume(void)  { -	if (cd.needs_suspend) { -		cd.epoch_cyc = read_sched_clock(); -		cd.epoch_cyc_copy = cd.epoch_cyc; -		cd.suspended = false; -	} +	cd.epoch_cyc = read_sched_clock(); +	cd.epoch_cyc_copy = cd.epoch_cyc; +	cd.suspended = false;  }  static struct syscore_ops sched_clock_ops = { diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index a81dcecc734..da1d1aa20ad 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -21,11 +21,9 @@  #include <linux/init.h>  #include <linux/kexec.h>  #include <linux/of_fdt.h> -#include <linux/root_dev.h>  #include <linux/cpu.h>  #include <linux/interrupt.h>  #include <linux/smp.h> -#include <linux/fs.h>  #include <linux/proc_fs.h>  #include <linux/memblock.h>  #include <linux/bug.h> @@ -55,16 +53,11 @@  #include <asm/traps.h>  #include <asm/unwind.h>  #include <asm/memblock.h> +#include <asm/virt.h> -#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) -#include "compat.h" -#endif  #include "atags.h"  #include "tcm.h" -#ifndef MEM_SIZE -#define MEM_SIZE	(16*1024*1024) -#endif  #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)  char fpe_type[8]; @@ -145,7 +138,6 @@ static const char *machine_name;  static char __initdata cmd_line[COMMAND_LINE_SIZE];  struct machine_desc *machine_desc __initdata; -static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;  static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };  #define ENDIANNESS ((char)endian_test.l) @@ -583,21 +575,6 @@ static int __init early_mem(char *p)  }  early_param("mem", early_mem); -static void __init -setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) -{ -#ifdef CONFIG_BLK_DEV_RAM -	extern int rd_size, rd_image_start, rd_prompt, rd_doload; - -	rd_image_start = image_start; -	rd_prompt = prompt; -	rd_doload = doload; - -	if (rd_sz) -		rd_size = rd_sz; -#endif -} -  static void __init request_standard_resources(struct machine_desc *mdesc)  {  	struct memblock_region *region; @@ -643,35 +620,6 @@ static void __init request_standard_resources(struct machine_desc *mdesc)  		request_resource(&ioport_resource, &lp2);  } -/* - *  Tag parsing. - * - * This is the new way of passing data to the kernel at boot time.  Rather - * than passing a fixed inflexible structure to the kernel, we pass a list - * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE - * tag for the list to be recognised (to distinguish the tagged list from - * a param_struct).  The list is terminated with a zero-length tag (this tag - * is not parsed in any way). - */ -static int __init parse_tag_core(const struct tag *tag) -{ -	if (tag->hdr.size > 2) { -		if ((tag->u.core.flags & 1) == 0) -			root_mountflags &= ~MS_RDONLY; -		ROOT_DEV = old_decode_dev(tag->u.core.rootdev); -	} -	return 0; -} - -__tagtable(ATAG_CORE, parse_tag_core); - -static int __init parse_tag_mem32(const struct tag *tag) -{ -	return arm_add_memory(tag->u.mem.start, tag->u.mem.size); -} - -__tagtable(ATAG_MEM, parse_tag_mem32); -  #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)  struct screen_info screen_info = {   .orig_video_lines	= 30, @@ -681,117 +629,8 @@ struct screen_info screen_info = {   .orig_video_isVGA	= 1,   .orig_video_points	= 8  }; - -static int __init parse_tag_videotext(const struct tag *tag) -{ -	screen_info.orig_x            = tag->u.videotext.x; -	screen_info.orig_y            = tag->u.videotext.y; -	screen_info.orig_video_page   = tag->u.videotext.video_page; -	screen_info.orig_video_mode   = tag->u.videotext.video_mode; -	screen_info.orig_video_cols   = tag->u.videotext.video_cols; -	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; -	screen_info.orig_video_lines  = tag->u.videotext.video_lines; -	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga; -	screen_info.orig_video_points = tag->u.videotext.video_points; -	return 0; -} - -__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);  #endif -static int __init parse_tag_ramdisk(const struct tag *tag) -{ -	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0, -		      (tag->u.ramdisk.flags & 2) == 0, -		      tag->u.ramdisk.start, tag->u.ramdisk.size); -	return 0; -} - -__tagtable(ATAG_RAMDISK, parse_tag_ramdisk); - -static int __init parse_tag_serialnr(const struct tag *tag) -{ -	system_serial_low = tag->u.serialnr.low; -	system_serial_high = tag->u.serialnr.high; -	return 0; -} - -__tagtable(ATAG_SERIAL, parse_tag_serialnr); - -static int __init parse_tag_revision(const struct tag *tag) -{ -	system_rev = tag->u.revision.rev; -	return 0; -} - -__tagtable(ATAG_REVISION, parse_tag_revision); - -static int __init parse_tag_cmdline(const struct tag *tag) -{ -#if defined(CONFIG_CMDLINE_EXTEND) -	strlcat(default_command_line, " ", COMMAND_LINE_SIZE); -	strlcat(default_command_line, tag->u.cmdline.cmdline, -		COMMAND_LINE_SIZE); -#elif defined(CONFIG_CMDLINE_FORCE) -	pr_warning("Ignoring tag cmdline (using the default kernel command line)\n"); -#else -	strlcpy(default_command_line, tag->u.cmdline.cmdline, -		COMMAND_LINE_SIZE); -#endif -	return 0; -} - -__tagtable(ATAG_CMDLINE, parse_tag_cmdline); - -/* - * Scan the tag table for this tag, and call its parse function. - * The tag table is built by the linker from all the __tagtable - * declarations. - */ -static int __init parse_tag(const struct tag *tag) -{ -	extern struct tagtable __tagtable_begin, __tagtable_end; -	struct tagtable *t; - -	for (t = &__tagtable_begin; t < &__tagtable_end; t++) -		if (tag->hdr.tag == t->tag) { -			t->parse(tag); -			break; -		} - -	return t < &__tagtable_end; -} - -/* - * Parse all tags in the list, checking both the global and architecture - * specific tag tables. - */ -static void __init parse_tags(const struct tag *t) -{ -	for (; t->hdr.size; t = tag_next(t)) -		if (!parse_tag(t)) -			printk(KERN_WARNING -				"Ignoring unrecognised tag 0x%08x\n", -				t->hdr.tag); -} - -/* - * This holds our defaults. - */ -static struct init_tags { -	struct tag_header hdr1; -	struct tag_core   core; -	struct tag_header hdr2; -	struct tag_mem32  mem; -	struct tag_header hdr3; -} init_tags __initdata = { -	{ tag_size(tag_core), ATAG_CORE }, -	{ 1, PAGE_SIZE, 0xff }, -	{ tag_size(tag_mem32), ATAG_MEM }, -	{ MEM_SIZE }, -	{ 0, ATAG_NONE } -}; -  static int __init customize_machine(void)  {  	/* customizes platform devices, or adds new ones */ @@ -858,78 +697,6 @@ static void __init reserve_crashkernel(void)  static inline void reserve_crashkernel(void) {}  #endif /* CONFIG_KEXEC */ -static void __init squash_mem_tags(struct tag *tag) -{ -	for (; tag->hdr.size; tag = tag_next(tag)) -		if (tag->hdr.tag == ATAG_MEM) -			tag->hdr.tag = ATAG_NONE; -} - -static struct machine_desc * __init setup_machine_tags(unsigned int nr) -{ -	struct tag *tags = (struct tag *)&init_tags; -	struct machine_desc *mdesc = NULL, *p; -	char *from = default_command_line; - -	init_tags.mem.start = PHYS_OFFSET; - -	/* -	 * locate machine in the list of supported machines. -	 */ -	for_each_machine_desc(p) -		if (nr == p->nr) { -			printk("Machine: %s\n", p->name); -			mdesc = p; -			break; -		} - -	if (!mdesc) { -		early_print("\nError: unrecognized/unsupported machine ID" -			" (r1 = 0x%08x).\n\n", nr); -		dump_machine_table(); /* does not return */ -	} - -	if (__atags_pointer) -		tags = phys_to_virt(__atags_pointer); -	else if (mdesc->atag_offset) -		tags = (void *)(PAGE_OFFSET + mdesc->atag_offset); - -#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) -	/* -	 * If we have the old style parameters, convert them to -	 * a tag list. -	 */ -	if (tags->hdr.tag != ATAG_CORE) -		convert_to_tag_list(tags); -#endif - -	if (tags->hdr.tag != ATAG_CORE) { -#if defined(CONFIG_OF) -		/* -		 * If CONFIG_OF is set, then assume this is a reasonably -		 * modern system that should pass boot parameters -		 */ -		early_print("Warning: Neither atags nor dtb found\n"); -#endif -		tags = (struct tag *)&init_tags; -	} - -	if (mdesc->fixup) -		mdesc->fixup(tags, &from, &meminfo); - -	if (tags->hdr.tag == ATAG_CORE) { -		if (meminfo.nr_banks != 0) -			squash_mem_tags(tags); -		save_atags(tags); -		parse_tags(tags); -	} - -	/* parse_early_param needs a boot_command_line */ -	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); - -	return mdesc; -} -  static int __init meminfo_cmp(const void *_a, const void *_b)  {  	const struct membank *a = _a, *b = _b; @@ -937,6 +704,21 @@ static int __init meminfo_cmp(const void *_a, const void *_b)  	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;  } +void __init hyp_mode_check(void) +{ +#ifdef CONFIG_ARM_VIRT_EXT +	if (is_hyp_mode_available()) { +		pr_info("CPU: All CPU(s) started in HYP mode.\n"); +		pr_info("CPU: Virtualization extensions available.\n"); +	} else if (is_hyp_mode_mismatched()) { +		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n", +			__boot_cpu_mode & MODE_MASK); +		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n"); +	} else +		pr_info("CPU: All CPU(s) started in SVC mode.\n"); +#endif +} +  void __init setup_arch(char **cmdline_p)  {  	struct machine_desc *mdesc; @@ -944,7 +726,7 @@ void __init setup_arch(char **cmdline_p)  	setup_processor();  	mdesc = setup_machine_fdt(__atags_pointer);  	if (!mdesc) -		mdesc = setup_machine_tags(machine_arch_type); +		mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);  	machine_desc = mdesc;  	machine_name = mdesc->name; @@ -977,9 +759,15 @@ void __init setup_arch(char **cmdline_p)  	unflatten_device_tree();  #ifdef CONFIG_SMP -	if (is_smp()) +	if (is_smp()) { +		smp_set_ops(mdesc->smp);  		smp_init_cpus(); +	}  #endif + +	if (!is_smp()) +		hyp_mode_check(); +  	reserve_crashkernel();  	tcm_init(); diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index f27789e4e38..56f72d257eb 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -10,7 +10,6 @@  #include <linux/errno.h>  #include <linux/signal.h>  #include <linux/personality.h> -#include <linux/freezer.h>  #include <linux/uaccess.h>  #include <linux/tracehook.h> diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ebd8ad274d7..fbc8b2623d8 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -19,14 +19,15 @@  #include <linux/mm.h>  #include <linux/err.h>  #include <linux/cpu.h> -#include <linux/smp.h>  #include <linux/seq_file.h>  #include <linux/irq.h>  #include <linux/percpu.h>  #include <linux/clockchips.h>  #include <linux/completion.h> +#include <linux/cpufreq.h>  #include <linux/atomic.h> +#include <asm/smp.h>  #include <asm/cacheflush.h>  #include <asm/cpu.h>  #include <asm/cputype.h> @@ -42,6 +43,8 @@  #include <asm/ptrace.h>  #include <asm/localtimer.h>  #include <asm/smp_plat.h> +#include <asm/virt.h> +#include <asm/mach/arch.h>  /*   * as from 2.5, kernels no longer have an init_tasks structure @@ -50,8 +53,15 @@   */  struct secondary_data secondary_data; +/* + * control for which core is the next to come out of the secondary + * boot "holding pen" + */ +volatile int __cpuinitdata pen_release = -1; +  enum ipi_msg_type { -	IPI_TIMER = 2, +	IPI_WAKEUP, +	IPI_TIMER,  	IPI_RESCHEDULE,  	IPI_CALL_FUNC,  	IPI_CALL_FUNC_SINGLE, @@ -60,6 +70,14 @@ enum ipi_msg_type {  static DECLARE_COMPLETION(cpu_running); +static struct smp_operations smp_ops; + +void __init smp_set_ops(struct smp_operations *ops) +{ +	if (ops) +		smp_ops = *ops; +}; +  int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)  {  	int ret; @@ -100,13 +118,64 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)  	return ret;  } +/* platform specific SMP operations */ +void __init smp_init_cpus(void) +{ +	if (smp_ops.smp_init_cpus) +		smp_ops.smp_init_cpus(); +} + +static void __init platform_smp_prepare_cpus(unsigned int max_cpus) +{ +	if (smp_ops.smp_prepare_cpus) +		smp_ops.smp_prepare_cpus(max_cpus); +} + +static void __cpuinit platform_secondary_init(unsigned int cpu) +{ +	if (smp_ops.smp_secondary_init) +		smp_ops.smp_secondary_init(cpu); +} + +int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +{ +	if (smp_ops.smp_boot_secondary) +		return smp_ops.smp_boot_secondary(cpu, idle); +	return -ENOSYS; +} +  #ifdef CONFIG_HOTPLUG_CPU  static void percpu_timer_stop(void); +static int platform_cpu_kill(unsigned int cpu) +{ +	if (smp_ops.cpu_kill) +		return smp_ops.cpu_kill(cpu); +	return 1; +} + +static void platform_cpu_die(unsigned int cpu) +{ +	if (smp_ops.cpu_die) +		smp_ops.cpu_die(cpu); +} + +static int platform_cpu_disable(unsigned int cpu) +{ +	if (smp_ops.cpu_disable) +		return smp_ops.cpu_disable(cpu); + +	/* +	 * By default, allow disabling all CPUs except the first one, +	 * since this is special on a lot of platforms, e.g. because +	 * of clock tick interrupts. +	 */ +	return cpu == 0 ? -EPERM : 0; +}  /*   * __cpu_disable runs on the processor to be shutdown.   */ -int __cpu_disable(void) +int __cpuinit __cpu_disable(void)  {  	unsigned int cpu = smp_processor_id();  	int ret; @@ -134,8 +203,11 @@ int __cpu_disable(void)  	/*  	 * Flush user cache and TLB mappings, and then remove this CPU  	 * from the vm mask set of all processes. +	 * +	 * Caches are flushed to the Level of Unification Inner Shareable +	 * to write-back dirty lines to unified caches shared by all CPUs.  	 */ -	flush_cache_all(); +	flush_cache_louis();  	local_flush_tlb_all();  	clear_tasks_mm_cpumask(cpu); @@ -149,7 +221,7 @@ static DECLARE_COMPLETION(cpu_died);   * called on the thread which is asking for a CPU to be shutdown -   * waits until shutdown has completed, or it is timed out.   */ -void __cpu_die(unsigned int cpu) +void __cpuinit __cpu_die(unsigned int cpu)  {  	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {  		pr_err("CPU%u: cpu didn't die\n", cpu); @@ -222,18 +294,24 @@ static void percpu_timer_setup(void);  asmlinkage void __cpuinit secondary_start_kernel(void)  {  	struct mm_struct *mm = &init_mm; -	unsigned int cpu = smp_processor_id(); +	unsigned int cpu; + +	/* +	 * The identity mapping is uncached (strongly ordered), so +	 * switch away from it before attempting any exclusive accesses. +	 */ +	cpu_switch_mm(mm->pgd, mm); +	enter_lazy_tlb(mm, current); +	local_flush_tlb_all();  	/*  	 * All kernel threads share the same mm context; grab a  	 * reference and switch to it.  	 */ +	cpu = smp_processor_id();  	atomic_inc(&mm->mm_count);  	current->active_mm = mm;  	cpumask_set_cpu(cpu, mm_cpumask(mm)); -	cpu_switch_mm(mm->pgd, mm); -	enter_lazy_tlb(mm, current); -	local_flush_tlb_all();  	printk("CPU%u: Booted secondary processor\n", cpu); @@ -287,6 +365,8 @@ void __init smp_cpus_done(unsigned int max_cpus)  	       num_online_cpus(),  	       bogosum / (500000/HZ),  	       (bogosum / (5000/HZ)) % 100); + +	hyp_mode_check();  }  void __init smp_prepare_boot_cpu(void) @@ -347,7 +427,8 @@ void arch_send_call_function_single_ipi(int cpu)  }  static const char *ipi_types[NR_IPI] = { -#define S(x,s)	[x - IPI_TIMER] = s +#define S(x,s)	[x] = s +	S(IPI_WAKEUP, "CPU wakeup interrupts"),  	S(IPI_TIMER, "Timer broadcast interrupts"),  	S(IPI_RESCHEDULE, "Rescheduling interrupts"),  	S(IPI_CALL_FUNC, "Function call interrupts"), @@ -500,10 +581,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)  	unsigned int cpu = smp_processor_id();  	struct pt_regs *old_regs = set_irq_regs(regs); -	if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) -		__inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); +	if (ipinr < NR_IPI) +		__inc_irq_stat(cpu, ipi_irqs[ipinr]);  	switch (ipinr) { +	case IPI_WAKEUP: +		break; +  	case IPI_TIMER:  		irq_enter();  		ipi_timer(); @@ -584,3 +668,56 @@ int setup_profiling_timer(unsigned int multiplier)  {  	return -EINVAL;  } + +#ifdef CONFIG_CPU_FREQ + +static DEFINE_PER_CPU(unsigned long, l_p_j_ref); +static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq); +static unsigned long global_l_p_j_ref; +static unsigned long global_l_p_j_ref_freq; + +static int cpufreq_callback(struct notifier_block *nb, +					unsigned long val, void *data) +{ +	struct cpufreq_freqs *freq = data; +	int cpu = freq->cpu; + +	if (freq->flags & CPUFREQ_CONST_LOOPS) +		return NOTIFY_OK; + +	if (!per_cpu(l_p_j_ref, cpu)) { +		per_cpu(l_p_j_ref, cpu) = +			per_cpu(cpu_data, cpu).loops_per_jiffy; +		per_cpu(l_p_j_ref_freq, cpu) = freq->old; +		if (!global_l_p_j_ref) { +			global_l_p_j_ref = loops_per_jiffy; +			global_l_p_j_ref_freq = freq->old; +		} +	} + +	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) || +	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || +	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { +		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, +						global_l_p_j_ref_freq, +						freq->new); +		per_cpu(cpu_data, cpu).loops_per_jiffy = +			cpufreq_scale(per_cpu(l_p_j_ref, cpu), +					per_cpu(l_p_j_ref_freq, cpu), +					freq->new); +	} +	return NOTIFY_OK; +} + +static struct notifier_block cpufreq_notifier = { +	.notifier_call  = cpufreq_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ +	return cpufreq_register_notifier(&cpufreq_notifier, +						CPUFREQ_TRANSITION_NOTIFIER); +} +core_initcall(register_cpufreq_notifier); + +#endif diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index e1f906989bb..b22d700fea2 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -42,10 +42,10 @@ static void twd_set_mode(enum clock_event_mode mode,  	switch (mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		/* timer load already set up */  		ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE  			| TWD_TIMER_CONTROL_PERIODIC; -		__raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD); +		__raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ), +			twd_base + TWD_TIMER_LOAD);  		break;  	case CLOCK_EVT_MODE_ONESHOT:  		/* period set, and timer enabled in 'next_event' hook */ diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 1794cc3b0f1..358bca3a995 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -17,6 +17,8 @@ extern void cpu_resume_mmu(void);   */  void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)  { +	u32 *ctx = ptr; +  	*save_ptr = virt_to_phys(ptr);  	/* This must correspond to the LDM in cpu_resume() assembly */ @@ -26,7 +28,20 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)  	cpu_do_suspend(ptr); -	flush_cache_all(); +	flush_cache_louis(); + +	/* +	 * flush_cache_louis does not guarantee that +	 * save_ptr and ptr are cleaned to main memory, +	 * just up to the Level of Unification Inner Shareable. +	 * Since the context pointer and context itself +	 * are to be retrieved with the MMU off that +	 * data must be cleaned from all cache levels +	 * to main memory using "area" cache primitives. +	*/ +	__cpuc_flush_dcache_area(ctx, ptrsz); +	__cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr)); +  	outer_clean_range(*save_ptr, *save_ptr + ptrsz);  	outer_clean_range(virt_to_phys(save_ptr),  			  virt_to_phys(save_ptr) + sizeof(*save_ptr)); diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 76cbb055dd0..c2a898aa57a 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -59,69 +59,6 @@ asmlinkage int sys_vfork(struct pt_regs *regs)  	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);  } -/* sys_execve() executes a new program. - * This is called indirectly via a small wrapper - */ -asmlinkage int sys_execve(const char __user *filenamei, -			  const char __user *const __user *argv, -			  const char __user *const __user *envp, struct pt_regs *regs) -{ -	int error; -	char * filename; - -	filename = getname(filenamei); -	error = PTR_ERR(filename); -	if (IS_ERR(filename)) -		goto out; -	error = do_execve(filename, argv, envp, regs); -	putname(filename); -out: -	return error; -} - -int kernel_execve(const char *filename, -		  const char *const argv[], -		  const char *const envp[]) -{ -	struct pt_regs regs; -	int ret; - -	memset(®s, 0, sizeof(struct pt_regs)); -	ret = do_execve(filename, -			(const char __user *const __user *)argv, -			(const char __user *const __user *)envp, ®s); -	if (ret < 0) -		goto out; - -	/* -	 * Save argc to the register structure for userspace. -	 */ -	regs.ARM_r0 = ret; - -	/* -	 * We were successful.  We won't be returning to our caller, but -	 * instead to user space by manipulating the kernel stack. -	 */ -	asm(	"add	r0, %0, %1\n\t" -		"mov	r1, %2\n\t" -		"mov	r2, %3\n\t" -		"bl	memmove\n\t"	/* copy regs to top of stack */ -		"mov	r8, #0\n\t"	/* not a syscall */ -		"mov	r9, %0\n\t"	/* thread structure */ -		"mov	sp, r0\n\t"	/* reposition stack pointer */ -		"b	ret_to_user" -		: -		: "r" (current_thread_info()), -		  "Ir" (THREAD_START_SP - sizeof(regs)), -		  "r" (®s), -		  "Ir" (sizeof(regs)) -		: "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory"); - - out: -	return ret; -} -EXPORT_SYMBOL(kernel_execve); -  /*   * Since loff_t is a 64 bit type we avoid a lot of ABI hassle   * with a different argument ordering. diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index af2afb01967..09be0c3c906 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -25,7 +25,6 @@  #include <linux/timer.h>  #include <linux/irq.h> -#include <asm/leds.h>  #include <asm/thread_info.h>  #include <asm/sched_clock.h>  #include <asm/stacktrace.h> @@ -80,21 +79,6 @@ u32 arch_gettimeoffset(void)  }  #endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ -#ifdef CONFIG_LEDS_TIMER -static inline void do_leds(void) -{ -	static unsigned int count = HZ/2; - -	if (--count == 0) { -		count = HZ/2; -		leds_event(led_timer); -	} -} -#else -#define	do_leds() -#endif - -  #ifndef CONFIG_GENERIC_CLOCKEVENTS  /*   * Kernel system timer support. @@ -102,7 +86,6 @@ static inline void do_leds(void)  void timer_tick(void)  {  	profile_tick(CPU_PROFILING); -	do_leds();  	xtime_update(1);  #ifndef CONFIG_SMP  	update_process_times(user_mode(get_irq_regs())); | 
