diff options
Diffstat (limited to 'arch/ppc64/kernel/iSeries_setup.c')
-rw-r--r-- | arch/ppc64/kernel/iSeries_setup.c | 119 |
1 files changed, 86 insertions, 33 deletions
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c index 86966ce76b5..077c82fc9f3 100644 --- a/arch/ppc64/kernel/iSeries_setup.c +++ b/arch/ppc64/kernel/iSeries_setup.c @@ -24,7 +24,6 @@ #include <linux/smp.h> #include <linux/param.h> #include <linux/string.h> -#include <linux/bootmem.h> #include <linux/initrd.h> #include <linux/seq_file.h> #include <linux/kdev_t.h> @@ -676,7 +675,6 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr) */ static void __init iSeries_setup_arch(void) { - void *eventStack; unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index; /* Add an eye catcher and the systemcfg layout version number */ @@ -685,24 +683,7 @@ static void __init iSeries_setup_arch(void) systemcfg->version.minor = SYSTEMCFG_MINOR; /* Setup the Lp Event Queue */ - - /* Allocate a page for the Event Stack - * The hypervisor wants the absolute real address, so - * we subtract out the KERNELBASE and add in the - * absolute real address of the kernel load area - */ - eventStack = alloc_bootmem_pages(LpEventStackSize); - memset(eventStack, 0, LpEventStackSize); - - /* Invoke the hypervisor to initialize the event stack */ - HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); - - /* Initialize fields in our Lp Event Queue */ - xItLpQueue.xSlicEventStackPtr = (char *)eventStack; - xItLpQueue.xSlicCurEventPtr = (char *)eventStack; - xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack + - (LpEventStackSize - LpEventMaxSize); - xItLpQueue.xIndex = 0; + setup_hvlpevent_queue(); /* Compute processor frequency */ procFreqHz = ((1UL << 34) * 1000000) / @@ -853,27 +834,91 @@ static int __init iSeries_src_init(void) late_initcall(iSeries_src_init); -static int set_spread_lpevents(char *str) +static inline void process_iSeries_events(void) { - unsigned long i; - unsigned long val = simple_strtoul(str, NULL, 0); + asm volatile ("li 0,0x5555; sc" : : : "r0", "r3"); +} + +static void yield_shared_processor(void) +{ + unsigned long tb; + + HvCall_setEnabledInterrupts(HvCall_MaskIPI | + HvCall_MaskLpEvent | + HvCall_MaskLpProd | + HvCall_MaskTimeout); + + tb = get_tb(); + /* Compute future tb value when yield should expire */ + HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy); /* - * The parameter is the number of processors to share in processing - * lp events. + * The decrementer stops during the yield. Force a fake decrementer + * here and let the timer_interrupt code sort out the actual time. */ - if (( val > 0) && (val <= NR_CPUS)) { - for (i = 1; i < val; ++i) - paca[i].lpqueue_ptr = paca[0].lpqueue_ptr; + get_paca()->lppaca.int_dword.fields.decr_int = 1; + process_iSeries_events(); +} - printk("lpevent processing spread over %ld processors\n", val); - } else { - printk("invalid spread_lpevents %ld\n", val); +static int iseries_shared_idle(void) +{ + while (1) { + while (!need_resched() && !hvlpevent_is_pending()) { + local_irq_disable(); + ppc64_runlatch_off(); + + /* Recheck with irqs off */ + if (!need_resched() && !hvlpevent_is_pending()) + yield_shared_processor(); + + HMT_medium(); + local_irq_enable(); + } + + ppc64_runlatch_on(); + + if (hvlpevent_is_pending()) + process_iSeries_events(); + + schedule(); + } + + return 0; +} + +static int iseries_dedicated_idle(void) +{ + long oldval; + + while (1) { + oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); + + if (!oldval) { + set_thread_flag(TIF_POLLING_NRFLAG); + + while (!need_resched()) { + ppc64_runlatch_off(); + HMT_low(); + + if (hvlpevent_is_pending()) { + HMT_medium(); + ppc64_runlatch_on(); + process_iSeries_events(); + } + } + + HMT_medium(); + clear_thread_flag(TIF_POLLING_NRFLAG); + } else { + set_need_resched(); + } + + ppc64_runlatch_on(); + schedule(); } - return 1; + return 0; } -__setup("spread_lpevents=", set_spread_lpevents); #ifndef CONFIG_PCI void __init iSeries_init_IRQ(void) { } @@ -900,5 +945,13 @@ void __init iSeries_early_setup(void) ppc_md.get_rtc_time = iSeries_get_rtc_time; ppc_md.calibrate_decr = iSeries_calibrate_decr; ppc_md.progress = iSeries_progress; + + if (get_paca()->lppaca.shared_proc) { + ppc_md.idle_loop = iseries_shared_idle; + printk(KERN_INFO "Using shared processor idle loop\n"); + } else { + ppc_md.idle_loop = iseries_dedicated_idle; + printk(KERN_INFO "Using dedicated idle loop\n"); + } } |