aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/process.c')
-rw-r--r--arch/arm64/kernel/process.c158
1 files changed, 89 insertions, 69 deletions
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index cb0956bc96e..43b7c34f92c 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -20,6 +20,7 @@
#include <stdarg.h>
+#include <linux/compat.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -45,9 +46,10 @@
#include <asm/compat.h>
#include <asm/cacheflush.h>
+#include <asm/fpsimd.h>
+#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/stacktrace.h>
-#include <asm/fpsimd.h>
static void setup_restart(void)
{
@@ -70,8 +72,17 @@ static void setup_restart(void)
void soft_restart(unsigned long addr)
{
+ typedef void (*phys_reset_t)(unsigned long);
+ phys_reset_t phys_reset;
+
setup_restart();
- cpu_reset(addr);
+
+ /* Switch to the identity mapping */
+ phys_reset = (phys_reset_t)virt_to_phys(cpu_reset);
+ phys_reset(addr);
+
+ /* Should never get here */
+ BUG();
}
/*
@@ -80,14 +91,13 @@ void soft_restart(unsigned long addr)
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
-void (*pm_restart)(const char *cmd);
-EXPORT_SYMBOL_GPL(pm_restart);
-
+void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+EXPORT_SYMBOL_GPL(arm_pm_restart);
/*
* This is our default idle handler.
*/
-static void default_idle(void)
+void arch_cpu_idle(void)
{
/*
* This should do all the clock switching and wait for interrupt
@@ -97,79 +107,73 @@ static void default_idle(void)
local_irq_enable();
}
-void (*pm_idle)(void) = default_idle;
-EXPORT_SYMBOL_GPL(pm_idle);
-
-/*
- * The idle thread, has rather strange semantics for calling pm_idle,
- * but this is what x86 does and we need to do the same, so that
- * things like cpuidle get called in the same way. The only difference
- * is that we always respect 'hlt_counter' to prevent low power idle.
- */
-void cpu_idle(void)
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
{
- local_fiq_enable();
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched()) {
- /*
- * We need to disable interrupts here to ensure
- * we don't miss a wakeup call.
- */
- local_irq_disable();
- if (!need_resched()) {
- stop_critical_timings();
- pm_idle();
- start_critical_timings();
- /*
- * pm_idle functions should always return
- * with IRQs enabled.
- */
- WARN_ON(irqs_disabled());
- } else {
- local_irq_enable();
- }
- }
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
+ cpu_die();
}
+#endif
+/*
+ * Called by kexec, immediately prior to machine_kexec().
+ *
+ * This must completely disable all secondary CPUs; simply causing those CPUs
+ * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
+ * kexec'd kernel to use any and all RAM as it sees fit, without having to
+ * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
+ * functionality embodied in disable_nonboot_cpus() to achieve this.
+ */
void machine_shutdown(void)
{
-#ifdef CONFIG_SMP
- smp_send_stop();
-#endif
+ disable_nonboot_cpus();
}
+/*
+ * Halting simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this.
+ */
void machine_halt(void)
{
- machine_shutdown();
+ local_irq_disable();
+ smp_send_stop();
while (1);
}
+/*
+ * Power-off simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this. When the system power is turned off, it will take all CPUs
+ * with it.
+ */
void machine_power_off(void)
{
- machine_shutdown();
+ local_irq_disable();
+ smp_send_stop();
if (pm_power_off)
pm_power_off();
}
+/*
+ * Restart requires that the secondary CPUs stop performing any activity
+ * while the primary CPU resets the system. Systems with a single CPU can
+ * use soft_restart() as their machine descriptor's .restart hook, since that
+ * will cause the only available CPU to reset. Systems with multiple CPUs must
+ * provide a HW restart implementation, to ensure that all CPUs reset at once.
+ * This is required so that any code running after reset on the primary CPU
+ * doesn't have to co-ordinate with other CPUs to ensure they aren't still
+ * executing pre-reset code, and using RAM that the primary CPU's code wishes
+ * to use. Implementing such co-ordination would be essentially impossible.
+ */
void machine_restart(char *cmd)
{
- machine_shutdown();
-
/* Disable interrupts first */
local_irq_disable();
- local_fiq_disable();
+ smp_send_stop();
/* Now call the architecture specific reboot code. */
- if (pm_restart)
- pm_restart(cmd);
+ if (arm_pm_restart)
+ arm_pm_restart(reboot_mode, cmd);
/*
* Whoops - the architecture was unable to reboot.
@@ -180,19 +184,26 @@ void machine_restart(char *cmd)
void __show_regs(struct pt_regs *regs)
{
- int i;
+ int i, top_reg;
+ u64 lr, sp;
- printk("CPU: %d %s (%s %.*s)\n",
- raw_smp_processor_id(), print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
+ if (compat_user_mode(regs)) {
+ lr = regs->compat_lr;
+ sp = regs->compat_sp;
+ top_reg = 12;
+ } else {
+ lr = regs->regs[30];
+ sp = regs->sp;
+ top_reg = 29;
+ }
+
+ show_regs_print_info(KERN_DEFAULT);
print_symbol("PC is at %s\n", instruction_pointer(regs));
- print_symbol("LR is at %s\n", regs->regs[30]);
+ print_symbol("LR is at %s\n", lr);
printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
- regs->pc, regs->regs[30], regs->pstate);
- printk("sp : %016llx\n", regs->sp);
- for (i = 29; i >= 0; i--) {
+ regs->pc, lr, regs->pstate);
+ printk("sp : %016llx\n", sp);
+ for (i = top_reg; i >= 0; i--) {
printk("x%-2d: %016llx ", i, regs->regs[i]);
if (i % 2 == 0)
printk("\n");
@@ -203,7 +214,6 @@ void __show_regs(struct pt_regs *regs)
void show_regs(struct pt_regs * regs)
{
printk("\n");
- printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
__show_regs(regs);
}
@@ -226,7 +236,7 @@ void release_thread(struct task_struct *dead_task)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- fpsimd_save_state(&current->thread.fpsimd_state);
+ fpsimd_preserve_current_state();
*dst = *src;
return 0;
}
@@ -315,6 +325,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
fpsimd_thread_switch(next);
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
+ contextidr_thread_switch(next);
+
+ /*
+ * Complete any pending TLB or cache maintenance on this CPU in case
+ * the thread migrates to a different CPU.
+ */
+ dsb(ish);
/* the actual thread switch */
last = cpu_switch_to(prev, next);
@@ -325,6 +342,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -332,9 +350,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame))
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;