diff options
Diffstat (limited to 'drivers/acpi/processor_idle.c')
| -rw-r--r-- | drivers/acpi/processor_idle.c | 1500 |
1 files changed, 809 insertions, 691 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 161db4acfb9..3dca36d4ad2 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -3,7 +3,7 @@ * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> + * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * - Added processor hotplug support * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> @@ -28,51 +28,51 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> #include <linux/acpi.h> #include <linux/dmi.h> -#include <linux/moduleparam.h> -#include <linux/sched.h> /* need_resched() */ +#include <linux/sched.h> /* need_resched() */ +#include <linux/clockchips.h> +#include <linux/cpuidle.h> +#include <linux/syscore_ops.h> +#include <acpi/processor.h> -#include <asm/io.h> -#include <asm/uaccess.h> +/* + * Include the apic definitions for x86 to have the APIC timer related defines + * available also for UP (on SMP it gets magically included via linux/smp.h). + * asm/acpi.h is not an option, as it would require more include magic. Also + * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. + */ +#ifdef CONFIG_X86 +#include <asm/apic.h> +#endif -#include <acpi/acpi_bus.h> -#include <acpi/processor.h> +#define PREFIX "ACPI: " -#define ACPI_PROCESSOR_COMPONENT 0x01000000 #define ACPI_PROCESSOR_CLASS "processor" -#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" #define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("acpi_processor") -#define ACPI_PROCESSOR_FILE_POWER "power" -#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) -#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ -#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ -static void (*pm_idle_save) (void); -module_param(max_cstate, uint, 0644); - -static unsigned int nocst = 0; +ACPI_MODULE_NAME("processor_idle"); + +static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; +module_param(max_cstate, uint, 0000); +static unsigned int nocst __read_mostly; module_param(nocst, uint, 0000); +static int bm_check_disable __read_mostly; +module_param(bm_check_disable, uint, 0000); -/* - * bm_history -- bit-mask with a bit per jiffy of bus-master activity - * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms - * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms - * 100 HZ: 0x0000000F: 4 jiffies = 40ms - * reduce history for more aggressive entry into C3 - */ -static unsigned int bm_history = - (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); -module_param(bm_history, uint, 0644); -/* -------------------------------------------------------------------------- - Power Management - -------------------------------------------------------------------------- */ +static unsigned int latency_factor __read_mostly = 2; +module_param(latency_factor, uint, 0644); + +static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); + +static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], + acpi_cstate); + +static int disabled_by_idle_boot_param(void) +{ + return boot_option_idle_override == IDLE_POLL || + boot_option_idle_override == IDLE_HALT; +} /* * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. @@ -80,7 +80,7 @@ module_param(bm_history, uint, 0644); * * To skip this limit, boot/load with a large max_cstate limit. */ -static int set_max_cstate(struct dmi_system_id *id) +static int set_max_cstate(const struct dmi_system_id *id) { if (max_cstate > ACPI_PROCESSOR_MAX_POWER) return 0; @@ -94,509 +94,270 @@ static int set_max_cstate(struct dmi_system_id *id) return 0; } -static struct dmi_system_id __initdata processor_power_dmi_table[] = { - {set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR, - "IBM"), - DMI_MATCH(DMI_BIOS_VERSION, - "1SET60WW")}, - (void *)1}, - {set_max_cstate, "Medion 41700", { - DMI_MATCH(DMI_BIOS_VENDOR, - "Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION, - "R01-A1J")}, (void *)1}, - {set_max_cstate, "Clevo 5600D", { - DMI_MATCH(DMI_BIOS_VENDOR, - "Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION, - "SHE845M0.86C.0013.D.0302131307")}, +static struct dmi_system_id processor_power_dmi_table[] = { + { set_max_cstate, "Clevo 5600D", { + DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), + DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, (void *)2}, + { set_max_cstate, "Pavilion zv5000", { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, + (void *)1}, + { set_max_cstate, "Asus L8400B", { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, + (void *)1}, {}, }; -static inline u32 ticks_elapsed(u32 t1, u32 t2) -{ - if (t2 >= t1) - return (t2 - t1); - else if (!acpi_fadt.tmr_val_ext) - return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); - else - return ((0xFFFFFFFF - t1) + t2); -} -static void -acpi_processor_power_activate(struct acpi_processor *pr, - struct acpi_processor_cx *new) +/* + * Callers should disable interrupts before the call and enable + * interrupts after return. + */ +static void acpi_safe_halt(void) { - struct acpi_processor_cx *old; - - if (!pr || !new) - return; - - old = pr->power.state; - - if (old) - old->promotion.count = 0; - new->demotion.count = 0; - - /* Cleanup from old state. */ - if (old) { - switch (old->type) { - case ACPI_STATE_C3: - /* Disable bus master reload */ - if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, - ACPI_MTX_DO_NOT_LOCK); - break; - } - } - - /* Prepare to use new state. */ - switch (new->type) { - case ACPI_STATE_C3: - /* Enable bus master reload */ - if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, - ACPI_MTX_DO_NOT_LOCK); - break; + if (!tif_need_resched()) { + safe_halt(); + local_irq_disable(); } - - pr->power.state = new; - - return; } -static atomic_t c3_cpu_count; +#ifdef ARCH_APICTIMER_STOPS_ON_C3 -static void acpi_processor_idle(void) +/* + * Some BIOS implementations switch to C3 in the published C2 state. + * This seems to be a common problem on AMD boxen, but other vendors + * are affected too. We pick the most conservative approach: we assume + * that the local APIC stops in both C2 and C3. + */ +static void lapic_timer_check_state(int state, struct acpi_processor *pr, + struct acpi_processor_cx *cx) { - struct acpi_processor *pr = NULL; - struct acpi_processor_cx *cx = NULL; - struct acpi_processor_cx *next_state = NULL; - int sleep_ticks = 0; - u32 t1, t2 = 0; - - pr = processors[raw_smp_processor_id()]; - if (!pr) - return; - - /* - * Interrupts must be disabled during bus mastering calculations and - * for C2/C3 transitions. - */ - local_irq_disable(); + struct acpi_processor_power *pwr = &pr->power; + u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; - /* - * Check whether we truly need to go idle, or should - * reschedule: - */ - if (unlikely(need_resched())) { - local_irq_enable(); + if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) return; - } - cx = pr->power.state; - if (!cx) - goto easy_out; + if (amd_e400_c1e_detected) + type = ACPI_STATE_C1; /* - * Check BM Activity - * ----------------- - * Check for bus mastering activity (if required), record, and check - * for demotion. + * Check, if one of the previous states already marked the lapic + * unstable */ - if (pr->flags.bm_check) { - u32 bm_status = 0; - unsigned long diff = jiffies - pr->power.bm_check_timestamp; - - if (diff > 32) - diff = 32; - - while (diff) { - /* if we didn't get called, assume there was busmaster activity */ - diff--; - if (diff) - pr->power.bm_activity |= 0x1; - pr->power.bm_activity <<= 1; - } - - acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, - &bm_status, ACPI_MTX_DO_NOT_LOCK); - if (bm_status) { - pr->power.bm_activity++; - acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, - 1, ACPI_MTX_DO_NOT_LOCK); - } - /* - * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect - * the true state of bus mastering activity; forcing us to - * manually check the BMIDEA bit of each IDE channel. - */ - else if (errata.piix4.bmisx) { - if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) - || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) - pr->power.bm_activity++; - } - - pr->power.bm_check_timestamp = jiffies; - - /* - * Apply bus mastering demotion policy. Automatically demote - * to avoid a faulty transition. Note that the processor - * won't enter a low-power state during this call (to this - * funciton) but should upon the next. - * - * TBD: A better policy might be to fallback to the demotion - * state (use it for this quantum only) istead of - * demoting -- and rely on duration as our sole demotion - * qualification. This may, however, introduce DMA - * issues (e.g. floppy DMA transfer overrun/underrun). - */ - if (pr->power.bm_activity & cx->demotion.threshold.bm) { - local_irq_enable(); - next_state = cx->demotion.state; - goto end; - } - } - - cx->usage++; + if (pwr->timer_broadcast_on_state < state) + return; - /* - * Sleep: - * ------ - * Invoke the current Cx state to put the processor to sleep. - */ - switch (cx->type) { + if (cx->type >= type) + pr->power.timer_broadcast_on_state = state; +} - case ACPI_STATE_C1: - /* - * Invoke C1. - * Use the appropriate idle routine, the one that would - * be used without acpi C-states. - */ - if (pm_idle_save) - pm_idle_save(); - else - safe_halt(); - /* - * TBD: Can't get time duration while in C1, as resumes - * go to an ISR rather than here. Need to instrument - * base interrupt handler. - */ - sleep_ticks = 0xFFFFFFFF; - break; +static void __lapic_timer_propagate_broadcast(void *arg) +{ + struct acpi_processor *pr = (struct acpi_processor *) arg; + unsigned long reason; - case ACPI_STATE_C2: - /* Get start time (ticks) */ - t1 = inl(acpi_fadt.xpm_tmr_blk.address); - /* Invoke C2 */ - inb(cx->address); - /* Dummy op - must do something useless after P_LVL2 read */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); - /* Get end time (ticks) */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); - /* Re-enable interrupts */ - local_irq_enable(); - /* Compute time (ticks) that we were actually asleep */ - sleep_ticks = - ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; - break; - - case ACPI_STATE_C3: - - if (pr->flags.bm_check) { - if (atomic_inc_return(&c3_cpu_count) == - num_online_cpus()) { - /* - * All CPUs are trying to go to C3 - * Disable bus master arbitration - */ - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, - ACPI_MTX_DO_NOT_LOCK); - } - } else { - /* SMP with no shared cache... Invalidate cache */ - ACPI_FLUSH_CPU_CACHE(); - } + reason = pr->power.timer_broadcast_on_state < INT_MAX ? + CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; - /* Get start time (ticks) */ - t1 = inl(acpi_fadt.xpm_tmr_blk.address); - /* Invoke C3 */ - inb(cx->address); - /* Dummy op - must do something useless after P_LVL3 read */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); - /* Get end time (ticks) */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); - if (pr->flags.bm_check) { - /* Enable bus master arbitration */ - atomic_dec(&c3_cpu_count); - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, - ACPI_MTX_DO_NOT_LOCK); - } + clockevents_notify(reason, &pr->id); +} - /* Re-enable interrupts */ - local_irq_enable(); - /* Compute time (ticks) that we were actually asleep */ - sleep_ticks = - ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; - break; +static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) +{ + smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, + (void *)pr, 1); +} - default: - local_irq_enable(); - return; - } +/* Power(C) State timer broadcast control */ +static void lapic_timer_state_broadcast(struct acpi_processor *pr, + struct acpi_processor_cx *cx, + int broadcast) +{ + int state = cx - pr->power.states; - next_state = pr->power.state; + if (state >= pr->power.timer_broadcast_on_state) { + unsigned long reason; - /* - * Promotion? - * ---------- - * Track the number of longs (time asleep is greater than threshold) - * and promote when the count threshold is reached. Note that bus - * mastering activity may prevent promotions. - * Do not promote above max_cstate. - */ - if (cx->promotion.state && - ((cx->promotion.state - pr->power.states) <= max_cstate)) { - if (sleep_ticks > cx->promotion.threshold.ticks) { - cx->promotion.count++; - cx->demotion.count = 0; - if (cx->promotion.count >= - cx->promotion.threshold.count) { - if (pr->flags.bm_check) { - if (! - (pr->power.bm_activity & cx-> - promotion.threshold.bm)) { - next_state = - cx->promotion.state; - goto end; - } - } else { - next_state = cx->promotion.state; - goto end; - } - } - } + reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : + CLOCK_EVT_NOTIFY_BROADCAST_EXIT; + clockevents_notify(reason, &pr->id); } +} - /* - * Demotion? - * --------- - * Track the number of shorts (time asleep is less than time threshold) - * and demote when the usage threshold is reached. - */ - if (cx->demotion.state) { - if (sleep_ticks < cx->demotion.threshold.ticks) { - cx->demotion.count++; - cx->promotion.count = 0; - if (cx->demotion.count >= cx->demotion.threshold.count) { - next_state = cx->demotion.state; - goto end; - } - } - } +#else - end: - /* - * Demote if current state exceeds max_cstate - */ - if ((pr->power.state - pr->power.states) > max_cstate) { - if (cx->demotion.state) - next_state = cx->demotion.state; - } +static void lapic_timer_check_state(int state, struct acpi_processor *pr, + struct acpi_processor_cx *cstate) { } +static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } +static void lapic_timer_state_broadcast(struct acpi_processor *pr, + struct acpi_processor_cx *cx, + int broadcast) +{ +} - /* - * New Cx State? - * ------------- - * If we're going to start using a new Cx state we must clean up - * from the previous and prepare to use the new. - */ - if (next_state != pr->power.state) - acpi_processor_power_activate(pr, next_state); +#endif - return; +#ifdef CONFIG_PM_SLEEP +static u32 saved_bm_rld; - easy_out: - /* do C1 instead of busy loop */ - if (pm_idle_save) - pm_idle_save(); - else - safe_halt(); - return; +static int acpi_processor_suspend(void) +{ + acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); + return 0; } -static int acpi_processor_set_power_policy(struct acpi_processor *pr) +static void acpi_processor_resume(void) { - unsigned int i; - unsigned int state_is_set = 0; - struct acpi_processor_cx *lower = NULL; - struct acpi_processor_cx *higher = NULL; - struct acpi_processor_cx *cx; - - ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy"); - - if (!pr) - return_VALUE(-EINVAL); - - /* - * This function sets the default Cx state policy (OS idle handler). - * Our scheme is to promote quickly to C2 but more conservatively - * to C3. We're favoring C2 for its characteristics of low latency - * (quick response), good power savings, and ability to allow bus - * mastering activity. Note that the Cx state policy is completely - * customizable and can be altered dynamically. - */ - - /* startup state */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; - - if (!state_is_set) - pr->power.state = cx; - state_is_set++; - break; - } + u32 resumed_bm_rld = 0; - if (!state_is_set) - return_VALUE(-ENODEV); + acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); + if (resumed_bm_rld == saved_bm_rld) + return; - /* demotion */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); +} - if (lower) { - cx->demotion.state = lower; - cx->demotion.threshold.ticks = cx->latency_ticks; - cx->demotion.threshold.count = 1; - if (cx->type == ACPI_STATE_C3) - cx->demotion.threshold.bm = bm_history; - } +static struct syscore_ops acpi_processor_syscore_ops = { + .suspend = acpi_processor_suspend, + .resume = acpi_processor_resume, +}; - lower = cx; - } +void acpi_processor_syscore_init(void) +{ + register_syscore_ops(&acpi_processor_syscore_ops); +} - /* promotion */ - for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; +void acpi_processor_syscore_exit(void) +{ + unregister_syscore_ops(&acpi_processor_syscore_ops); +} +#endif /* CONFIG_PM_SLEEP */ - if (higher) { - cx->promotion.state = higher; - cx->promotion.threshold.ticks = cx->latency_ticks; - if (cx->type >= ACPI_STATE_C2) - cx->promotion.threshold.count = 4; - else - cx->promotion.threshold.count = 10; - if (higher->type == ACPI_STATE_C3) - cx->promotion.threshold.bm = bm_history; - } +#if defined(CONFIG_X86) +static void tsc_check_state(int state) +{ + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + case X86_VENDOR_INTEL: + /* + * AMD Fam10h TSC will tick in all + * C/P/S0/S1 states when this bit is set. + */ + if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + return; - higher = cx; + /*FALL THROUGH*/ + default: + /* TSC could halt in idle, so notify users */ + if (state > ACPI_STATE_C1) + mark_tsc_unstable("TSC halts in idle"); } - - return_VALUE(0); } +#else +static void tsc_check_state(int state) { return; } +#endif static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) { - int i; - - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt"); - - if (!pr) - return_VALUE(-EINVAL); if (!pr->pblk) - return_VALUE(-ENODEV); - - for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) - memset(pr->power.states, 0, sizeof(struct acpi_processor_cx)); + return -ENODEV; /* if info is obtained from pblk/fadt, type equals state */ - pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; - /* the C0 state only exists as a filler in our array, - * and all processors need to support C1 */ - pr->power.states[ACPI_STATE_C0].valid = 1; - pr->power.states[ACPI_STATE_C1].valid = 1; +#ifndef CONFIG_HOTPLUG_CPU + /* + * Check for P_LVL2_UP flag before entering C2 and above on + * an SMP system. + */ + if ((num_online_cpus() > 1) && + !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) + return -ENODEV; +#endif /* determine C2 and C3 address from pblk */ pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; /* determine latencies from FADT */ - pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; - pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; + pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; + pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; + + /* + * FADT specified C2 latency must be less than or equal to + * 100 microseconds. + */ + if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency)); + /* invalidate C2 */ + pr->power.states[ACPI_STATE_C2].address = 0; + } + + /* + * FADT supplied C3 latency must be less than or equal to + * 1000 microseconds. + */ + if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency)); + /* invalidate C3 */ + pr->power.states[ACPI_STATE_C3].address = 0; + } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "lvl2[0x%08x] lvl3[0x%08x]\n", pr->power.states[ACPI_STATE_C2].address, pr->power.states[ACPI_STATE_C3].address)); - return_VALUE(0); + return 0; } -static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr) +static int acpi_processor_get_power_info_default(struct acpi_processor *pr) { - int i; - - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1"); - - for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) - memset(&(pr->power.states[i]), 0, - sizeof(struct acpi_processor_cx)); - - /* if info is obtained from pblk/fadt, type equals state */ - pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; - pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; - pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; - - /* the C0 state only exists as a filler in our array, - * and all processors need to support C1 */ + if (!pr->power.states[ACPI_STATE_C1].valid) { + /* set the first C-State to C1 */ + /* all processors need to support C1 */ + pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; + pr->power.states[ACPI_STATE_C1].valid = 1; + pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; + } + /* the C0 state only exists as a filler in our array */ pr->power.states[ACPI_STATE_C0].valid = 1; - pr->power.states[ACPI_STATE_C1].valid = 1; - - return_VALUE(0); + return 0; } static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) { acpi_status status = 0; - acpi_integer count; + u64 count; + int current_count; int i; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *cst; - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst"); if (nocst) - return_VALUE(-ENODEV); + return -ENODEV; - pr->power.count = 0; - for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) - memset(&(pr->power.states[i]), 0, - sizeof(struct acpi_processor_cx)); + current_count = 0; status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); - return_VALUE(-ENODEV); + return -ENODEV; } - cst = (union acpi_object *)buffer.pointer; + cst = buffer.pointer; /* There must be at least 2 elements */ if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "not enough elements in _CST\n")); + printk(KERN_ERR PREFIX "not enough elements in _CST\n"); status = -EFAULT; goto end; } @@ -605,22 +366,11 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) /* Validate number of power states. */ if (count < 1 || count != cst->package.count - 1) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "count given by _CST is not valid\n")); + printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); status = -EFAULT; goto end; } - /* We support up to ACPI_PROCESSOR_MAX_POWER. */ - if (count > ACPI_PROCESSOR_MAX_POWER) { - printk(KERN_WARNING - "Limiting number of power states to max (%d)\n", - ACPI_PROCESSOR_MAX_POWER); - printk(KERN_WARNING - "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); - count = ACPI_PROCESSOR_MAX_POWER; - } - /* Tell driver that at least _CST is supported. */ pr->flags.has_cst = 1; @@ -632,14 +382,14 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) memset(&cx, 0, sizeof(cx)); - element = (union acpi_object *)&(cst->package.elements[i]); + element = &(cst->package.elements[i]); if (element->type != ACPI_TYPE_PACKAGE) continue; if (element->package.count != 4) continue; - obj = (union acpi_object *)&(element->package.elements[0]); + obj = &(element->package.elements[0]); if (obj->type != ACPI_TYPE_BUFFER) continue; @@ -650,98 +400,112 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) continue; - cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ? - 0 : reg->address; - /* There should be an easy way to extract an integer... */ - obj = (union acpi_object *)&(element->package.elements[1]); + obj = &(element->package.elements[1]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.type = obj->integer.value; + /* + * Some buggy BIOSes won't list C1 in _CST - + * Let acpi_processor_get_power_info_default() handle them later + */ + if (i == 1 && cx.type != ACPI_STATE_C1) + current_count++; + + cx.address = reg->address; + cx.index = current_count + 1; + + cx.entry_method = ACPI_CSTATE_SYSTEMIO; + if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (acpi_processor_ffh_cstate_probe + (pr->id, &cx, reg) == 0) { + cx.entry_method = ACPI_CSTATE_FFH; + } else if (cx.type == ACPI_STATE_C1) { + /* + * C1 is a special case where FIXED_HARDWARE + * can be handled in non-MWAIT way as well. + * In that case, save this _CST entry info. + * Otherwise, ignore this info and continue. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + continue; + } + if (cx.type == ACPI_STATE_C1 && + (boot_option_idle_override == IDLE_NOMWAIT)) { + /* + * In most cases the C1 space_id obtained from + * _CST object is FIXED_HARDWARE access mode. + * But when the option of idle=halt is added, + * the entry_method type should be changed from + * CSTATE_FFH to CSTATE_HALT. + * When the option of idle=nomwait is added, + * the C1 entry_method type should be + * CSTATE_HALT. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } + } else { + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", + cx.address); + } - if ((cx.type != ACPI_STATE_C1) && - (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) - continue; - - if ((cx.type < ACPI_STATE_C1) || (cx.type > ACPI_STATE_C3)) - continue; + if (cx.type == ACPI_STATE_C1) { + cx.valid = 1; + } - obj = (union acpi_object *)&(element->package.elements[2]); + obj = &(element->package.elements[2]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.latency = obj->integer.value; - obj = (union acpi_object *)&(element->package.elements[3]); + obj = &(element->package.elements[3]); if (obj->type != ACPI_TYPE_INTEGER) continue; - cx.power = obj->integer.value; + current_count++; + memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); - (pr->power.count)++; - memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx)); + /* + * We support total ACPI_PROCESSOR_MAX_POWER - 1 + * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) + */ + if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { + printk(KERN_WARNING + "Limiting number of power states to max (%d)\n", + ACPI_PROCESSOR_MAX_POWER); + printk(KERN_WARNING + "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); + break; + } } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", - pr->power.count)); + current_count)); /* Validate number of power states discovered */ - if (pr->power.count < 2) - status = -ENODEV; + if (current_count < 2) + status = -EFAULT; end: - acpi_os_free(buffer.pointer); - - return_VALUE(status); -} - -static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) -{ - ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2"); - - if (!cx->address) - return_VOID; - - /* - * C2 latency must be less than or equal to 100 - * microseconds. - */ - else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "latency too large [%d]\n", cx->latency)); - return_VOID; - } + kfree(buffer.pointer); - /* - * Otherwise we've met all of our C2 requirements. - * Normalize the C2 latency to expidite policy - */ - cx->valid = 1; - cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); - - return_VOID; + return status; } static void acpi_processor_power_verify_c3(struct acpi_processor *pr, struct acpi_processor_cx *cx) { - static int bm_check_flag; + static int bm_check_flag = -1; + static int bm_control_flag = -1; - ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3"); if (!cx->address) - return_VOID; - - /* - * C3 latency must be less than or equal to 1000 - * microseconds. - */ - else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "latency too large [%d]\n", cx->latency)); - return_VOID; - } + return; /* * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) @@ -753,38 +517,44 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, else if (errata.piix4.fdma) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "C3 not supported on PIIX4 with Type-F DMA\n")); - return_VOID; + return; } /* All the logic here assumes flags.bm_check is same across all CPUs */ - if (!bm_check_flag) { + if (bm_check_flag == -1) { /* Determine whether bm_check is needed based on CPU */ acpi_processor_power_init_bm_check(&(pr->flags), pr->id); bm_check_flag = pr->flags.bm_check; + bm_control_flag = pr->flags.bm_control; } else { pr->flags.bm_check = bm_check_flag; + pr->flags.bm_control = bm_control_flag; } if (pr->flags.bm_check) { - /* bus mastering control is necessary */ if (!pr->flags.bm_control) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C3 support requires bus mastering control\n")); - return_VOID; + if (pr->flags.has_cst != 1) { + /* bus mastering control is necessary */ + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 support requires BM control\n")); + return; + } else { + /* Here we enter C3 without bus mastering */ + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 support without BM control\n")); + } } } else { /* * WBINVD should be set in fadt, for C3 state to be * supported on when bm_check is not required. */ - if (acpi_fadt.wb_invd != 1) { + if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cache invalidation should work properly" " for C3 to be enabled on SMP systems\n")); - return_VOID; + return; } - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, - 0, ACPI_MTX_DO_NOT_LOCK); } /* @@ -794,9 +564,18 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, * use this in our C3 policy */ cx->valid = 1; - cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); - return_VOID; + /* + * On older chipsets, BM_RLD needs to be set + * in order for Bus Master activity to wake the + * system from C3. Newer chipsets handle DMA + * during C3 automatically and BM_RLD is a NOP. + * In either case, the proper way to + * handle BM_RLD is to set it and leave it set. + */ + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); + + return; } static int acpi_processor_power_verify(struct acpi_processor *pr) @@ -804,7 +583,9 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) unsigned int i; unsigned int working = 0; - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { + pr->power.timer_broadcast_on_state = INT_MAX; + + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { struct acpi_processor_cx *cx = &pr->power.states[i]; switch (cx->type) { @@ -813,18 +594,25 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) break; case ACPI_STATE_C2: - acpi_processor_power_verify_c2(cx); + if (!cx->address) + break; + cx->valid = 1; break; case ACPI_STATE_C3: acpi_processor_power_verify_c3(pr, cx); break; } + if (!cx->valid) + continue; - if (cx->valid) - working++; + lapic_timer_check_state(i, pr, cx); + tsc_check_state(cx->type); + working++; } + lapic_timer_propagate_broadcast(pr); + return (working); } @@ -833,29 +621,23 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) unsigned int i; int result; - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info"); /* NOTE: the idle thread may not be running while calling * this function */ + /* Zero initialize all the C-states info. */ + memset(pr->power.states, 0, sizeof(pr->power.states)); + result = acpi_processor_get_power_info_cst(pr); - if ((result) || (acpi_processor_power_verify(pr) < 2)) { + if (result == -ENODEV) result = acpi_processor_get_power_info_fadt(pr); - if ((result) || (acpi_processor_power_verify(pr) < 2)) - result = acpi_processor_get_power_info_default_c1(pr); - } - /* - * Set Default Policy - * ------------------ - * Now that we know which states are supported, set the default - * policy. Note that this policy can be changed dynamically - * (e.g. encourage deeper sleeps to conserve battery life when - * not on AC). - */ - result = acpi_processor_set_power_policy(pr); if (result) - return_VALUE(result); + return result; + + acpi_processor_get_power_info_default(pr); + + pr->power.count = acpi_processor_power_verify(pr); /* * if one state of type C2 or C3 is available, mark this @@ -864,134 +646,482 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { if (pr->power.states[i].valid) { pr->power.count = i; - pr->flags.power = 1; + if (pr->power.states[i].type >= ACPI_STATE_C2) + pr->flags.power = 1; } } - return_VALUE(0); + return 0; } -int acpi_processor_cst_has_changed(struct acpi_processor *pr) +/** + * acpi_idle_bm_check - checks if bus master activity was detected + */ +static int acpi_idle_bm_check(void) { - int result = 0; + u32 bm_status = 0; - ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed"); + if (bm_check_disable) + return 0; - if (!pr) - return_VALUE(-EINVAL); + acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); + if (bm_status) + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); + /* + * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect + * the true state of bus mastering activity; forcing us to + * manually check the BMIDEA bit of each IDE channel. + */ + else if (errata.piix4.bmisx) { + if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) + || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) + bm_status = 1; + } + return bm_status; +} - if (nocst) { - return_VALUE(-ENODEV); +/** + * acpi_idle_do_entry - a helper function that does C2 and C3 type entry + * @cx: cstate data + * + * Caller disables interrupt before call and enables interrupt after return. + */ +static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) +{ + /* Don't trace irqs off for idle */ + stop_critical_timings(); + if (cx->entry_method == ACPI_CSTATE_FFH) { + /* Call into architectural FFH based C-state */ + acpi_processor_ffh_cstate_enter(cx); + } else if (cx->entry_method == ACPI_CSTATE_HALT) { + acpi_safe_halt(); + } else { + /* IO port based C-state */ + inb(cx->address); + /* Dummy wait op - must do something useless after P_LVL2 read + because chipsets cannot guarantee that STPCLK# signal + gets asserted in time to freeze execution properly. */ + inl(acpi_gbl_FADT.xpm_timer_block.address); } + start_critical_timings(); +} - if (!pr->flags.power_setup_done) - return_VALUE(-ENODEV); +/** + * acpi_idle_enter_c1 - enters an ACPI C1 state-type + * @dev: the target CPU + * @drv: cpuidle driver containing cpuidle state info + * @index: index of target state + * + * This is equivalent to the HALT instruction. + */ +static int acpi_idle_enter_c1(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct acpi_processor *pr; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); + + pr = __this_cpu_read(processors); - /* Fall back to the default idle loop */ - pm_idle = pm_idle_save; - synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ + if (unlikely(!pr)) + return -EINVAL; - pr->flags.power = 0; - result = acpi_processor_get_power_info(pr); - if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) - pm_idle = acpi_processor_idle; + lapic_timer_state_broadcast(pr, cx, 1); + acpi_idle_do_entry(cx); - return_VALUE(result); + lapic_timer_state_broadcast(pr, cx, 0); + + return index; } -/* proc interface */ -static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) +/** + * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) + * @dev: the target CPU + * @index: the index of suggested state + */ +static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) { - struct acpi_processor *pr = (struct acpi_processor *)seq->private; - unsigned int i; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); - ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show"); + ACPI_FLUSH_CPU_CACHE(); - if (!pr) - goto end; + while (1) { - seq_printf(seq, "active state: C%zd\n" - "max_cstate: C%d\n" - "bus master activity: %08x\n", - pr->power.state ? pr->power.state - pr->power.states : 0, - max_cstate, (unsigned)pr->power.bm_activity); + if (cx->entry_method == ACPI_CSTATE_HALT) + safe_halt(); + else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { + inb(cx->address); + /* See comment in acpi_idle_do_entry() */ + inl(acpi_gbl_FADT.xpm_timer_block.address); + } else + return -ENODEV; + } - seq_puts(seq, "states:\n"); + /* Never reached */ + return 0; +} - for (i = 1; i <= pr->power.count; i++) { - seq_printf(seq, " %cC%d: ", - (&pr->power.states[i] == - pr->power.state ? '*' : ' '), i); +/** + * acpi_idle_enter_simple - enters an ACPI state without BM handling + * @dev: the target CPU + * @drv: cpuidle driver with cpuidle state information + * @index: the index of suggested state + */ +static int acpi_idle_enter_simple(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct acpi_processor *pr; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); - if (!pr->power.states[i].valid) { - seq_puts(seq, "<not supported>\n"); - continue; + pr = __this_cpu_read(processors); + + if (unlikely(!pr)) + return -EINVAL; + +#ifdef CONFIG_HOTPLUG_CPU + if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && + !pr->flags.has_cst && + !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) + return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START); +#endif + + /* + * Must be done before busmaster disable as we might need to + * access HPET ! + */ + lapic_timer_state_broadcast(pr, cx, 1); + + if (cx->type == ACPI_STATE_C3) + ACPI_FLUSH_CPU_CACHE(); + + /* Tell the scheduler that we are going deep-idle: */ + sched_clock_idle_sleep_event(); + acpi_idle_do_entry(cx); + + sched_clock_idle_wakeup_event(0); + + lapic_timer_state_broadcast(pr, cx, 0); + return index; +} + +static int c3_cpu_count; +static DEFINE_RAW_SPINLOCK(c3_lock); + +/** + * acpi_idle_enter_bm - enters C3 with proper BM handling + * @dev: the target CPU + * @drv: cpuidle driver containing state data + * @index: the index of suggested state + * + * If BM is detected, the deepest non-C3 idle state is entered instead. + */ +static int acpi_idle_enter_bm(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct acpi_processor *pr; + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); + + pr = __this_cpu_read(processors); + + if (unlikely(!pr)) + return -EINVAL; + +#ifdef CONFIG_HOTPLUG_CPU + if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && + !pr->flags.has_cst && + !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) + return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START); +#endif + + if (!cx->bm_sts_skip && acpi_idle_bm_check()) { + if (drv->safe_state_index >= 0) { + return drv->states[drv->safe_state_index].enter(dev, + drv, drv->safe_state_index); + } else { + acpi_safe_halt(); + return -EBUSY; } + } - switch (pr->power.states[i].type) { - case ACPI_STATE_C1: - seq_printf(seq, "type[C1] "); + acpi_unlazy_tlb(smp_processor_id()); + + /* Tell the scheduler that we are going deep-idle: */ + sched_clock_idle_sleep_event(); + /* + * Must be done before busmaster disable as we might need to + * access HPET ! + */ + lapic_timer_state_broadcast(pr, cx, 1); + + /* + * disable bus master + * bm_check implies we need ARB_DIS + * !bm_check implies we need cache flush + * bm_control implies whether we can do ARB_DIS + * + * That leaves a case where bm_check is set and bm_control is + * not set. In that case we cannot do much, we enter C3 + * without doing anything. + */ + if (pr->flags.bm_check && pr->flags.bm_control) { + raw_spin_lock(&c3_lock); + c3_cpu_count++; + /* Disable bus master arbitration when all CPUs are in C3 */ + if (c3_cpu_count == num_online_cpus()) + acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); + raw_spin_unlock(&c3_lock); + } else if (!pr->flags.bm_check) { + ACPI_FLUSH_CPU_CACHE(); + } + + acpi_idle_do_entry(cx); + + /* Re-enable bus master arbitration */ + if (pr->flags.bm_check && pr->flags.bm_control) { + raw_spin_lock(&c3_lock); + acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); + c3_cpu_count--; + raw_spin_unlock(&c3_lock); + } + + sched_clock_idle_wakeup_event(0); + + lapic_timer_state_broadcast(pr, cx, 0); + return index; +} + +struct cpuidle_driver acpi_idle_driver = { + .name = "acpi_idle", + .owner = THIS_MODULE, +}; + +/** + * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE + * device i.e. per-cpu data + * + * @pr: the ACPI processor + * @dev : the cpuidle device + */ +static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, + struct cpuidle_device *dev) +{ + int i, count = CPUIDLE_DRIVER_STATE_START; + struct acpi_processor_cx *cx; + + if (!pr->flags.power_setup_done) + return -EINVAL; + + if (pr->flags.power == 0) { + return -EINVAL; + } + + if (!dev) + return -EINVAL; + + dev->cpu = pr->id; + + if (max_cstate == 0) + max_cstate = 1; + + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { + cx = &pr->power.states[i]; + + if (!cx->valid) + continue; + + per_cpu(acpi_cstate[count], dev->cpu) = cx; + + count++; + if (count == CPUIDLE_STATE_MAX) break; - case ACPI_STATE_C2: - seq_printf(seq, "type[C2] "); + } + + if (!count) + return -EINVAL; + + return 0; +} + +/** + * acpi_processor_setup_cpuidle states- prepares and configures cpuidle + * global state data i.e. idle routines + * + * @pr: the ACPI processor + */ +static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) +{ + int i, count = CPUIDLE_DRIVER_STATE_START; + struct acpi_processor_cx *cx; + struct cpuidle_state *state; + struct cpuidle_driver *drv = &acpi_idle_driver; + + if (!pr->flags.power_setup_done) + return -EINVAL; + + if (pr->flags.power == 0) + return -EINVAL; + + drv->safe_state_index = -1; + for (i = 0; i < CPUIDLE_STATE_MAX; i++) { + drv->states[i].name[0] = '\0'; + drv->states[i].desc[0] = '\0'; + } + + if (max_cstate == 0) + max_cstate = 1; + + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { + cx = &pr->power.states[i]; + + if (!cx->valid) + continue; + + state = &drv->states[count]; + snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); + strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + state->exit_latency = cx->latency; + state->target_residency = cx->latency * latency_factor; + + state->flags = 0; + switch (cx->type) { + case ACPI_STATE_C1: + if (cx->entry_method == ACPI_CSTATE_FFH) + state->flags |= CPUIDLE_FLAG_TIME_VALID; + + state->enter = acpi_idle_enter_c1; + state->enter_dead = acpi_idle_play_dead; + drv->safe_state_index = count; break; - case ACPI_STATE_C3: - seq_printf(seq, "type[C3] "); + + case ACPI_STATE_C2: + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->enter = acpi_idle_enter_simple; + state->enter_dead = acpi_idle_play_dead; + drv->safe_state_index = count; break; - default: - seq_printf(seq, "type[--] "); + + case ACPI_STATE_C3: + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->enter = pr->flags.bm_check ? + acpi_idle_enter_bm : + acpi_idle_enter_simple; break; } - if (pr->power.states[i].promotion.state) - seq_printf(seq, "promotion[C%zd] ", - (pr->power.states[i].promotion.state - - pr->power.states)); - else - seq_puts(seq, "promotion[--] "); - - if (pr->power.states[i].demotion.state) - seq_printf(seq, "demotion[C%zd] ", - (pr->power.states[i].demotion.state - - pr->power.states)); - else - seq_puts(seq, "demotion[--] "); - - seq_printf(seq, "latency[%03d] usage[%08d]\n", - pr->power.states[i].latency, - pr->power.states[i].usage); + count++; + if (count == CPUIDLE_STATE_MAX) + break; } - end: - return_VALUE(0); + drv->state_count = count; + + if (!count) + return -EINVAL; + + return 0; } -static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) +int acpi_processor_hotplug(struct acpi_processor *pr) { - return single_open(file, acpi_processor_power_seq_show, - PDE(inode)->data); + int ret = 0; + struct cpuidle_device *dev; + + if (disabled_by_idle_boot_param()) + return 0; + + if (nocst) + return -ENODEV; + + if (!pr->flags.power_setup_done) + return -ENODEV; + + dev = per_cpu(acpi_cpuidle_device, pr->id); + cpuidle_pause_and_lock(); + cpuidle_disable_device(dev); + acpi_processor_get_power_info(pr); + if (pr->flags.power) { + acpi_processor_setup_cpuidle_cx(pr, dev); + ret = cpuidle_enable_device(dev); + } + cpuidle_resume_and_unlock(); + + return ret; } -static struct file_operations acpi_processor_power_fops = { - .open = acpi_processor_power_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +int acpi_processor_cst_has_changed(struct acpi_processor *pr) +{ + int cpu; + struct acpi_processor *_pr; + struct cpuidle_device *dev; + + if (disabled_by_idle_boot_param()) + return 0; + + if (nocst) + return -ENODEV; + + if (!pr->flags.power_setup_done) + return -ENODEV; + + /* + * FIXME: Design the ACPI notification to make it once per + * system instead of once per-cpu. This condition is a hack + * to make the code that updates C-States be called once. + */ + + if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { + + cpuidle_pause_and_lock(); + /* Protect against cpu-hotplug */ + get_online_cpus(); + + /* Disable all cpuidle devices */ + for_each_online_cpu(cpu) { + _pr = per_cpu(processors, cpu); + if (!_pr || !_pr->flags.power_setup_done) + continue; + dev = per_cpu(acpi_cpuidle_device, cpu); + cpuidle_disable_device(dev); + } + + /* Populate Updated C-state information */ + acpi_processor_get_power_info(pr); + acpi_processor_setup_cpuidle_states(pr); + + /* Enable all cpuidle devices */ + for_each_online_cpu(cpu) { + _pr = per_cpu(processors, cpu); + if (!_pr || !_pr->flags.power_setup_done) + continue; + acpi_processor_get_power_info(_pr); + if (_pr->flags.power) { + dev = per_cpu(acpi_cpuidle_device, cpu); + acpi_processor_setup_cpuidle_cx(_pr, dev); + cpuidle_enable_device(dev); + } + } + put_online_cpus(); + cpuidle_resume_and_unlock(); + } -int acpi_processor_power_init(struct acpi_processor *pr, - struct acpi_device *device) + return 0; +} + +static int acpi_processor_registered; + +int acpi_processor_power_init(struct acpi_processor *pr) { acpi_status status = 0; - static int first_run = 0; - struct proc_dir_entry *entry = NULL; - unsigned int i; + int retval; + struct cpuidle_device *dev; + static int first_run; - ACPI_FUNCTION_TRACE("acpi_processor_power_init"); + if (disabled_by_idle_boot_param()) + return 0; if (!first_run) { dmi_check_system(processor_power_dmi_table); + max_cstate = acpi_processor_cstate_check(max_cstate); if (max_cstate < ACPI_C_STATES_MAX) printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", @@ -999,81 +1129,69 @@ int acpi_processor_power_init(struct acpi_processor *pr, first_run++; } - if (!pr) - return_VALUE(-EINVAL); - - if (acpi_fadt.cst_cnt && !nocst) { + if (acpi_gbl_FADT.cst_control && !nocst) { status = - acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); + acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Notifying BIOS of _CST ability failed\n")); + ACPI_EXCEPTION((AE_INFO, status, + "Notifying BIOS of _CST ability failed")); } } - acpi_processor_power_init_pdc(&(pr->power), pr->id); - acpi_processor_set_pdc(pr, pr->power.pdc); acpi_processor_get_power_info(pr); + pr->flags.power_setup_done = 1; /* * Install the idle handler if processor power management is supported. * Note that we use previously set idle handler will be used on * platforms that only support C1. */ - if ((pr->flags.power) && (!boot_option_idle_override)) { - printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); - for (i = 1; i <= pr->power.count; i++) - if (pr->power.states[i].valid) - printk(" C%d[C%d]", i, - pr->power.states[i].type); - printk(")\n"); - - if (pr->id == 0) { - pm_idle_save = pm_idle; - pm_idle = acpi_processor_idle; + if (pr->flags.power) { + /* Register acpi_idle_driver if not already registered */ + if (!acpi_processor_registered) { + acpi_processor_setup_cpuidle_states(pr); + retval = cpuidle_register_driver(&acpi_idle_driver); + if (retval) + return retval; + printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", + acpi_idle_driver.name); } - } - /* 'power' [R] */ - entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, - S_IRUGO, acpi_device_dir(device)); - if (!entry) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Unable to create '%s' fs entry\n", - ACPI_PROCESSOR_FILE_POWER)); - else { - entry->proc_fops = &acpi_processor_power_fops; - entry->data = acpi_driver_data(device); - entry->owner = THIS_MODULE; - } + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + per_cpu(acpi_cpuidle_device, pr->id) = dev; - pr->flags.power_setup_done = 1; + acpi_processor_setup_cpuidle_cx(pr, dev); - return_VALUE(0); + /* Register per-cpu cpuidle_device. Cpuidle driver + * must already be registered before registering device + */ + retval = cpuidle_register_device(dev); + if (retval) { + if (acpi_processor_registered == 0) + cpuidle_unregister_driver(&acpi_idle_driver); + return retval; + } + acpi_processor_registered++; + } + return 0; } -int acpi_processor_power_exit(struct acpi_processor *pr, - struct acpi_device *device) +int acpi_processor_power_exit(struct acpi_processor *pr) { - ACPI_FUNCTION_TRACE("acpi_processor_power_exit"); - - pr->flags.power_setup_done = 0; + struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); - if (acpi_device_dir(device)) - remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, - acpi_device_dir(device)); - - /* Unregister the idle handler when processor #0 is removed. */ - if (pr->id == 0) { - pm_idle = pm_idle_save; + if (disabled_by_idle_boot_param()) + return 0; - /* - * We are about to unload the current idle thread pm callback - * (pm_idle), Wait for all processors to update cached/local - * copies of pm_idle before proceeding. - */ - cpu_idle_wait(); + if (pr->flags.power) { + cpuidle_unregister_device(dev); + acpi_processor_registered--; + if (acpi_processor_registered == 0) + cpuidle_unregister_driver(&acpi_idle_driver); } - return_VALUE(0); + pr->flags.power_setup_done = 0; + return 0; } |
