diff options
Diffstat (limited to 'arch/s390/kernel/setup.c')
| -rw-r--r-- | arch/s390/kernel/setup.c | 1271 |
1 files changed, 666 insertions, 605 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 31e7b19348b..1e2264b46e4 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -1,8 +1,6 @@ /* - * arch/s390/kernel/setup.c - * * S390 version - * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 1999, 2012 * Author(s): Hartmut Penner (hp@de.ibm.com), * Martin Schwidefsky (schwidefsky@de.ibm.com) * @@ -14,32 +12,42 @@ * This file handles the architecture-dependent parts of initialization */ +#define KMSG_COMPONENT "setup" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/errno.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> +#include <linux/memblock.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> -#include <linux/slab.h> #include <linux/user.h> -#include <linux/a.out.h> #include <linux/tty.h> #include <linux/ioport.h> #include <linux/delay.h> -#include <linux/config.h> #include <linux/init.h> #include <linux/initrd.h> #include <linux/bootmem.h> #include <linux/root_dev.h> #include <linux/console.h> -#include <linux/seq_file.h> #include <linux/kernel_stat.h> #include <linux/device.h> - -#include <asm/uaccess.h> -#include <asm/system.h> +#include <linux/notifier.h> +#include <linux/pfn.h> +#include <linux/ctype.h> +#include <linux/reboot.h> +#include <linux/topology.h> +#include <linux/ftrace.h> +#include <linux/kexec.h> +#include <linux/crash_dump.h> +#include <linux/memory.h> +#include <linux/compat.h> + +#include <asm/ipl.h> +#include <asm/facility.h> #include <asm/smp.h> #include <asm/mmu_context.h> #include <asm/cpcmd.h> @@ -47,114 +55,58 @@ #include <asm/irq.h> #include <asm/page.h> #include <asm/ptrace.h> +#include <asm/sections.h> +#include <asm/ebcdic.h> +#include <asm/kvm_virtio.h> +#include <asm/diag.h> +#include <asm/os_info.h> +#include <asm/sclp.h> +#include "entry.h" /* * Machine setup.. */ unsigned int console_mode = 0; -unsigned int console_devno = -1; -unsigned int console_irq = -1; -unsigned long memory_size = 0; -unsigned long machine_flags = 0; -struct { - unsigned long addr, size, type; -} memory_chunk[MEMORY_CHUNKS] = { { 0 } }; -#define CHUNK_READ_WRITE 0 -#define CHUNK_READ_ONLY 1 -volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ -unsigned long __initdata zholes_size[MAX_NR_ZONES]; -static unsigned long __initdata memory_end; +EXPORT_SYMBOL(console_mode); -/* - * Setup options - */ -extern int _text,_etext, _edata, _end; +unsigned int console_devno = -1; +EXPORT_SYMBOL(console_devno); -/* - * This is set up by the setup-routine at boot-time - * for S390 need to find out, what we have to setup - * using address 0x10400 ... - */ +unsigned int console_irq = -1; +EXPORT_SYMBOL(console_irq); -#include <asm/setup.h> +unsigned long elf_hwcap = 0; +char elf_platform[ELF_PLATFORM_SIZE]; -static char command_line[COMMAND_LINE_SIZE] = { 0, }; +int __initdata memory_end_set; +unsigned long __initdata memory_end; +unsigned long __initdata max_physmem_end; -static struct resource code_resource = { - .name = "Kernel code", - .start = (unsigned long) &_text, - .end = (unsigned long) &_etext - 1, - .flags = IORESOURCE_BUSY | IORESOURCE_MEM, -}; +unsigned long VMALLOC_START; +EXPORT_SYMBOL(VMALLOC_START); -static struct resource data_resource = { - .name = "Kernel data", - .start = (unsigned long) &_etext, - .end = (unsigned long) &_edata - 1, - .flags = IORESOURCE_BUSY | IORESOURCE_MEM, -}; +unsigned long VMALLOC_END; +EXPORT_SYMBOL(VMALLOC_END); -/* - * cpu_init() initializes state that is per-CPU. - */ -void __devinit cpu_init (void) -{ - int addr = hard_smp_processor_id(); +struct page *vmemmap; +EXPORT_SYMBOL(vmemmap); - /* - * Store processor id in lowcore (used e.g. in timer_interrupt) - */ - asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id)); - S390_lowcore.cpu_data.cpu_addr = addr; +#ifdef CONFIG_64BIT +unsigned long MODULES_VADDR; +unsigned long MODULES_END; +#endif - /* - * Force FPU initialization: - */ - clear_thread_flag(TIF_USEDFPU); - clear_used_math(); - - atomic_inc(&init_mm.mm_count); - current->active_mm = &init_mm; - if (current->mm) - BUG(); - enter_lazy_tlb(&init_mm, current); -} +/* An array with a pointer to the lowcore of every CPU. */ +struct _lowcore *lowcore_ptr[NR_CPUS]; +EXPORT_SYMBOL(lowcore_ptr); /* - * VM halt and poweroff setup routines + * This is set up by the setup-routine at boot-time + * for S390 need to find out, what we have to setup + * using address 0x10400 ... */ -char vmhalt_cmd[128] = ""; -char vmpoff_cmd[128] = ""; -static inline void strncpy_skip_quote(char *dst, char *src, int n) -{ - int sx, dx; - - dx = 0; - for (sx = 0; src[sx] != 0; sx++) { - if (src[sx] == '"') continue; - dst[dx++] = src[sx]; - if (dx >= n) break; - } -} - -static int __init vmhalt_setup(char *str) -{ - strncpy_skip_quote(vmhalt_cmd, str, 127); - vmhalt_cmd[127] = 0; - return 1; -} - -__setup("vmhalt=", vmhalt_setup); - -static int __init vmpoff_setup(char *str) -{ - strncpy_skip_quote(vmpoff_cmd, str, 127); - vmpoff_cmd[127] = 0; - return 1; -} - -__setup("vmpoff=", vmpoff_setup); +#include <asm/setup.h> /* * condev= and conmode= setup parameter. @@ -174,9 +126,24 @@ static int __init condev_setup(char *str) __setup("condev=", condev_setup); +static void __init set_preferred_console(void) +{ + if (MACHINE_IS_KVM) { + if (sclp_has_vt220()) + add_preferred_console("ttyS", 1, NULL); + else if (sclp_has_linemode()) + add_preferred_console("ttyS", 0, NULL); + else + add_preferred_console("hvc", 0, NULL); + } else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) + add_preferred_console("ttyS", 0, NULL); + else if (CONSOLE_IS_3270) + add_preferred_console("tty3270", 0, NULL); +} + static int __init conmode_setup(char *str) { -#if defined(CONFIG_SCLP_CONSOLE) +#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) SET_CONSOLE_SCLP; #endif @@ -188,6 +155,7 @@ static int __init conmode_setup(char *str) if (strncmp(str, "3270", 5) == 0) SET_CONSOLE_3270; #endif + set_preferred_console(); return 1; } @@ -199,11 +167,11 @@ static void __init conmode_default(void) char *ptr; if (MACHINE_IS_VM) { - __cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); + cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); console_devno = simple_strtoul(query_buffer + 5, NULL, 16); ptr = strstr(query_buffer, "SUBCHANNEL ="); console_irq = simple_strtoul(ptr + 13, NULL, 16); - __cpcmd("QUERY TERM", query_buffer, 1024, NULL); + cpcmd("QUERY TERM", query_buffer, 1024, NULL); ptr = strstr(query_buffer, "CONMODE"); /* * Set the conmode to 3215 so that the device recognition @@ -212,9 +180,9 @@ static void __init conmode_default(void) * 3215 and the 3270 driver will try to access the console * device (3215 as console and 3270 as normal tty). */ - __cpcmd("TERM CONMODE 3215", NULL, 0, NULL); + cpcmd("TERM CONMODE 3215", NULL, 0, NULL); if (ptr == NULL) { -#if defined(CONFIG_SCLP_CONSOLE) +#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif return; @@ -224,7 +192,7 @@ static void __init conmode_default(void) SET_CONSOLE_3270; #elif defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; -#elif defined(CONFIG_SCLP_CONSOLE) +#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif } else if (strncmp(ptr + 8, "3215", 4) == 0) { @@ -232,65 +200,30 @@ static void __init conmode_default(void) SET_CONSOLE_3215; #elif defined(CONFIG_TN3270_CONSOLE) SET_CONSOLE_3270; -#elif defined(CONFIG_SCLP_CONSOLE) +#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif } - } else if (MACHINE_IS_P390) { -#if defined(CONFIG_TN3215_CONSOLE) - SET_CONSOLE_3215; -#elif defined(CONFIG_TN3270_CONSOLE) - SET_CONSOLE_3270; -#endif } else { -#if defined(CONFIG_SCLP_CONSOLE) +#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif } } -#ifdef CONFIG_SMP -extern void machine_restart_smp(char *); -extern void machine_halt_smp(void); -extern void machine_power_off_smp(void); - -void (*_machine_restart)(char *command) = machine_restart_smp; -void (*_machine_halt)(void) = machine_halt_smp; -void (*_machine_power_off)(void) = machine_power_off_smp; -#else -/* - * Reboot, halt and power_off routines for non SMP. - */ -extern void reipl(unsigned long devno); -extern void reipl_diag(void); -static void do_machine_restart_nonsmp(char * __unused) -{ - reipl_diag(); - - if (MACHINE_IS_VM) - cpcmd ("IPL", NULL, 0); - else - reipl (0x10000 | S390_lowcore.ipl_device); -} - -static void do_machine_halt_nonsmp(void) +#ifdef CONFIG_CRASH_DUMP +static void __init setup_zfcpdump(void) { - if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) - cpcmd(vmhalt_cmd, NULL, 0); - signal_processor(smp_processor_id(), sigp_stop_and_store_status); + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return; + if (OLDMEM_BASE) + return; + strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev"); + console_loglevel = 2; } - -static void do_machine_power_off_nonsmp(void) -{ - if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) - cpcmd(vmpoff_cmd, NULL, 0); - signal_processor(smp_processor_id(), sigp_stop_and_store_status); -} - -void (*_machine_restart)(char *command) = do_machine_restart_nonsmp; -void (*_machine_halt)(void) = do_machine_halt_nonsmp; -void (*_machine_power_off)(void) = do_machine_power_off_nonsmp; -#endif +#else +static inline void setup_zfcpdump(void) {} +#endif /* CONFIG_CRASH_DUMP */ /* * Reboot, halt and power_off stubs. They just call _machine_restart, @@ -299,575 +232,703 @@ void (*_machine_power_off)(void) = do_machine_power_off_nonsmp; void machine_restart(char *command) { - console_unblank(); + if ((!in_interrupt() && !in_atomic()) || oops_in_progress) + /* + * Only unblank the console if we are called in enabled + * context or a bust_spinlocks cleared the way for us. + */ + console_unblank(); _machine_restart(command); } void machine_halt(void) { - console_unblank(); + if (!in_interrupt() || oops_in_progress) + /* + * Only unblank the console if we are called in enabled + * context or a bust_spinlocks cleared the way for us. + */ + console_unblank(); _machine_halt(); } void machine_power_off(void) { - console_unblank(); + if (!in_interrupt() || oops_in_progress) + /* + * Only unblank the console if we are called in enabled + * context or a bust_spinlocks cleared the way for us. + */ + console_unblank(); _machine_power_off(); } -static void __init -add_memory_hole(unsigned long start, unsigned long end) +/* + * Dummy power off function. + */ +void (*pm_power_off)(void) = machine_power_off; +EXPORT_SYMBOL_GPL(pm_power_off); + +static int __init early_parse_mem(char *p) { - unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; - - if (end <= dma_pfn) - zholes_size[ZONE_DMA] += end - start + 1; - else if (start > dma_pfn) - zholes_size[ZONE_NORMAL] += end - start + 1; - else { - zholes_size[ZONE_DMA] += dma_pfn - start + 1; - zholes_size[ZONE_NORMAL] += end - dma_pfn; - } + memory_end = memparse(p, &p); + memory_end &= PAGE_MASK; + memory_end_set = 1; + return 0; } +early_param("mem", early_parse_mem); -static void __init -parse_cmdline_early(char **cmdline_p) +static int __init parse_vmalloc(char *arg) { - char c = ' ', cn, *to = command_line, *from = COMMAND_LINE; - unsigned long delay = 0; - - /* Save unparsed command line copy for /proc/cmdline */ - memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); - saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; - - for (;;) { - /* - * "mem=XXX[kKmM]" sets memsize - */ - if (c == ' ' && strncmp(from, "mem=", 4) == 0) { - memory_end = simple_strtoul(from+4, &from, 0); - if ( *from == 'K' || *from == 'k' ) { - memory_end = memory_end << 10; - from++; - } else if ( *from == 'M' || *from == 'm' ) { - memory_end = memory_end << 20; - from++; - } - } - /* - * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes - */ - if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) { - delay = simple_strtoul(from+9, &from, 0); - if (*from == 's' || *from == 'S') { - delay = delay*1000000; - from++; - } else if (*from == 'm' || *from == 'M') { - delay = delay*60*1000000; - from++; - } - /* now wait for the requested amount of time */ - udelay(delay); - } - cn = *(from++); - if (!cn) - break; - if (cn == '\n') - cn = ' '; /* replace newlines with space */ - if (cn == 0x0d) - cn = ' '; /* replace 0x0d with space */ - if (cn == ' ' && c == ' ') - continue; /* remove additional spaces */ - c = cn; - if (to - command_line >= COMMAND_LINE_SIZE) - break; - *(to++) = c; - } - if (c == ' ' && to > command_line) to--; - *to = '\0'; - *cmdline_p = command_line; + if (!arg) + return -EINVAL; + VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK; + return 0; } +early_param("vmalloc", parse_vmalloc); -static void __init -setup_lowcore(void) +void *restart_stack __attribute__((__section__(".data"))); + +static void __init setup_lowcore(void) { struct _lowcore *lc; - int lc_pages; /* * Setup lowcore for boot cpu */ - lc_pages = sizeof(void *) == 8 ? 2 : 1; - lc = (struct _lowcore *) - __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); - memset(lc, 0, lc_pages * PAGE_SIZE); - lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; + BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); + lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); + lc->restart_psw.mask = PSW_KERNEL_BITS; lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) restart_int_handler; - lc->external_new_psw.mask = PSW_KERNEL_BITS; + lc->external_new_psw.mask = PSW_KERNEL_BITS | + PSW_MASK_DAT | PSW_MASK_MCHECK; lc->external_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) ext_int_handler; - lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; + lc->svc_new_psw.mask = PSW_KERNEL_BITS | + PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; - lc->program_new_psw.mask = PSW_KERNEL_BITS; + lc->program_new_psw.mask = PSW_KERNEL_BITS | + PSW_MASK_DAT | PSW_MASK_MCHECK; lc->program_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; - lc->mcck_new_psw.mask = - PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; + PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; + lc->mcck_new_psw.mask = PSW_KERNEL_BITS; lc->mcck_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; - lc->io_new_psw.mask = PSW_KERNEL_BITS; + lc->io_new_psw.mask = PSW_KERNEL_BITS | + PSW_MASK_DAT | PSW_MASK_MCHECK; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; - lc->ipl_device = S390_lowcore.ipl_device; - lc->jiffy_timer = -1LL; - lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; + lc->clock_comparator = -1ULL; + lc->kernel_stack = ((unsigned long) &init_thread_union) + + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->async_stack = (unsigned long) - __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; + __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->panic_stack = (unsigned long) - __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; + __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; -#ifndef CONFIG_ARCH_S390X + lc->machine_flags = S390_lowcore.machine_flags; + lc->stfl_fac_list = S390_lowcore.stfl_fac_list; + memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, + MAX_FACILITY_BIT/8); +#ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lc->extended_save_area_addr = (__u32) - __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); + __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0); /* enable extended save area */ - ctl_set_bit(14, 29); + __ctl_set_bit(14, 29); } +#else + lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif + lc->sync_enter_timer = S390_lowcore.sync_enter_timer; + lc->async_enter_timer = S390_lowcore.async_enter_timer; + lc->exit_timer = S390_lowcore.exit_timer; + lc->user_timer = S390_lowcore.user_timer; + lc->system_timer = S390_lowcore.system_timer; + lc->steal_timer = S390_lowcore.steal_timer; + lc->last_update_timer = S390_lowcore.last_update_timer; + lc->last_update_clock = S390_lowcore.last_update_clock; + lc->ftrace_func = S390_lowcore.ftrace_func; + + restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); + restart_stack += ASYNC_SIZE; + + /* + * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant + * restart data to the absolute zero lowcore. This is necessary if + * PSW restart is done on an offline CPU that has lowcore zero. + */ + lc->restart_stack = (unsigned long) restart_stack; + lc->restart_fn = (unsigned long) do_restart; + lc->restart_data = 0; + lc->restart_source = -1UL; + + /* Setup absolute zero lowcore */ + mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack); + mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn); + mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data); + mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); + mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); + +#ifdef CONFIG_SMP + lc->spinlock_lockval = arch_spin_lockval(0); +#endif + set_prefix((u32)(unsigned long) lc); + lowcore_ptr[0] = lc; } -static void __init -setup_resources(void) -{ - struct resource *res; - int i; +static struct resource code_resource = { + .name = "Kernel code", + .flags = IORESOURCE_BUSY | IORESOURCE_MEM, +}; + +static struct resource data_resource = { + .name = "Kernel data", + .flags = IORESOURCE_BUSY | IORESOURCE_MEM, +}; - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { - res = alloc_bootmem_low(sizeof(struct resource)); +static struct resource bss_resource = { + .name = "Kernel bss", + .flags = IORESOURCE_BUSY | IORESOURCE_MEM, +}; + +static struct resource __initdata *standard_resources[] = { + &code_resource, + &data_resource, + &bss_resource, +}; + +static void __init setup_resources(void) +{ + struct resource *res, *std_res, *sub_res; + struct memblock_region *reg; + int j; + + code_resource.start = (unsigned long) &_text; + code_resource.end = (unsigned long) &_etext - 1; + data_resource.start = (unsigned long) &_etext; + data_resource.end = (unsigned long) &_edata - 1; + bss_resource.start = (unsigned long) &__bss_start; + bss_resource.end = (unsigned long) &__bss_stop - 1; + + for_each_memblock(memory, reg) { + res = alloc_bootmem_low(sizeof(*res)); res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; - switch (memory_chunk[i].type) { - case CHUNK_READ_WRITE: - res->name = "System RAM"; - break; - case CHUNK_READ_ONLY: - res->name = "System ROM"; - res->flags |= IORESOURCE_READONLY; - break; - default: - res->name = "reserved"; - } - res->start = memory_chunk[i].addr; - res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; + + res->name = "System RAM"; + res->start = reg->base; + res->end = reg->base + reg->size - 1; request_resource(&iomem_resource, res); - request_resource(res, &code_resource); - request_resource(res, &data_resource); + + for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { + std_res = standard_resources[j]; + if (std_res->start < res->start || + std_res->start > res->end) + continue; + if (std_res->end > res->end) { + sub_res = alloc_bootmem_low(sizeof(*sub_res)); + *sub_res = *std_res; + sub_res->end = res->end; + std_res->start = res->end + 1; + request_resource(res, sub_res); + } else { + request_resource(res, std_res); + } + } } } -static void __init -setup_memory(void) +static void __init setup_memory_end(void) { - unsigned long bootmap_size; - unsigned long start_pfn, end_pfn, init_pfn; - unsigned long last_rw_end; - int i; - - /* - * partially used pages are not usable - thus - * we are rounding upwards: - */ - start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; - end_pfn = max_pfn = memory_end >> PAGE_SHIFT; - - /* Initialize storage key for kernel pages */ - for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++) - page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); + unsigned long vmax, vmalloc_size, tmp; + + /* Choose kernel address space layout: 2, 3, or 4 levels. */ +#ifdef CONFIG_64BIT + vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; + tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; + tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; + if (tmp <= (1UL << 42)) + vmax = 1UL << 42; /* 3-level kernel page table */ + else + vmax = 1UL << 53; /* 4-level kernel page table */ + /* module area is at the end of the kernel address space. */ + MODULES_END = vmax; + MODULES_VADDR = MODULES_END - MODULES_LEN; + VMALLOC_END = MODULES_VADDR; +#else + vmalloc_size = VMALLOC_END ?: 96UL << 20; + vmax = 1UL << 31; /* 2-level kernel page table */ + /* vmalloc area is at the end of the kernel address space. */ + VMALLOC_END = vmax; +#endif + VMALLOC_START = vmax - vmalloc_size; + + /* Split remaining virtual space between 1:1 mapping & vmemmap array */ + tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); + /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ + tmp = SECTION_ALIGN_UP(tmp); + tmp = VMALLOC_START - tmp * sizeof(struct page); + tmp &= ~((vmax >> 11) - 1); /* align to page table level */ + tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); + vmemmap = (struct page *) tmp; + + /* Take care that memory_end is set and <= vmemmap */ + memory_end = min(memory_end ?: max_physmem_end, tmp); + max_pfn = max_low_pfn = PFN_DOWN(memory_end); + memblock_remove(memory_end, ULONG_MAX); + + pr_notice("Max memory size: %luMB\n", memory_end >> 20); +} - /* - * Initialize the boot-time allocator (with low memory only): - */ - bootmap_size = init_bootmem(start_pfn, end_pfn); +static void __init setup_vmcoreinfo(void) +{ + mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); +} - /* - * Register RAM areas with the bootmem allocator. - */ - last_rw_end = start_pfn; - - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { - unsigned long start_chunk, end_chunk; - - if (memory_chunk[i].type != CHUNK_READ_WRITE) - continue; - start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); - start_chunk >>= PAGE_SHIFT; - end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); - end_chunk >>= PAGE_SHIFT; - if (start_chunk < start_pfn) - start_chunk = start_pfn; - if (end_chunk > end_pfn) - end_chunk = end_pfn; - if (start_chunk < end_chunk) { - /* Initialize storage key for RAM pages */ - for (init_pfn = start_chunk ; init_pfn < end_chunk; - init_pfn++) - page_set_storage_key(init_pfn << PAGE_SHIFT, - PAGE_DEFAULT_KEY); - free_bootmem(start_chunk << PAGE_SHIFT, - (end_chunk - start_chunk) << PAGE_SHIFT); - if (last_rw_end < start_chunk) - add_memory_hole(last_rw_end, start_chunk - 1); - last_rw_end = end_chunk; - } - } +#ifdef CONFIG_CRASH_DUMP - psw_set_key(PAGE_DEFAULT_KEY); +/* + * When kdump is enabled, we have to ensure that no memory from + * the area [0 - crashkernel memory size] and + * [crashk_res.start - crashk_res.end] is set offline. + */ +static int kdump_mem_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct memory_notify *arg = data; + + if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) + return NOTIFY_BAD; + if (arg->start_pfn > PFN_DOWN(crashk_res.end)) + return NOTIFY_OK; + if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) + return NOTIFY_OK; + return NOTIFY_BAD; +} - if (last_rw_end < end_pfn - 1) - add_memory_hole(last_rw_end, end_pfn - 1); +static struct notifier_block kdump_mem_nb = { + .notifier_call = kdump_mem_notifier, +}; - /* - * Reserve the bootmem bitmap itself as well. We do this in two - * steps (first step was init_bootmem()) because this catches - * the (very unlikely) case of us accidentally initializing the - * bootmem allocator with an invalid RAM area. - */ - reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size); +#endif -#ifdef CONFIG_BLK_DEV_INITRD - if (INITRD_START) { - if (INITRD_START + INITRD_SIZE <= memory_end) { - reserve_bootmem(INITRD_START, INITRD_SIZE); - initrd_start = INITRD_START; - initrd_end = initrd_start + INITRD_SIZE; - } else { - printk("initrd extends beyond end of memory " - "(0x%08lx > 0x%08lx)\ndisabling initrd\n", - initrd_start + INITRD_SIZE, memory_end); - initrd_start = initrd_end = 0; - } +/* + * Make sure that the area behind memory_end is protected + */ +static void reserve_memory_end(void) +{ +#ifdef CONFIG_CRASH_DUMP + if (ipl_info.type == IPL_TYPE_FCP_DUMP && + !OLDMEM_BASE && sclp_get_hsa_size()) { + memory_end = sclp_get_hsa_size(); + memory_end &= PAGE_MASK; + memory_end_set = 1; } #endif + if (!memory_end_set) + return; + memblock_reserve(memory_end, ULONG_MAX); } /* - * Setup function called from init/main.c just after the banner - * was printed. + * Make sure that oldmem, where the dump is stored, is protected */ +static void reserve_oldmem(void) +{ +#ifdef CONFIG_CRASH_DUMP + if (OLDMEM_BASE) + /* Forget all memory above the running kdump system */ + memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); +#endif +} -void __init -setup_arch(char **cmdline_p) +/* + * Make sure that oldmem, where the dump is stored, is protected + */ +static void remove_oldmem(void) { - /* - * print what head.S has found out about the machine - */ -#ifndef CONFIG_ARCH_S390X - printk((MACHINE_IS_VM) ? - "We are running under VM (31 bit mode)\n" : - "We are running native (31 bit mode)\n"); - printk((MACHINE_HAS_IEEE) ? - "This machine has an IEEE fpu\n" : - "This machine has no IEEE fpu\n"); -#else /* CONFIG_ARCH_S390X */ - printk((MACHINE_IS_VM) ? - "We are running under VM (64 bit mode)\n" : - "We are running native (64 bit mode)\n"); -#endif /* CONFIG_ARCH_S390X */ +#ifdef CONFIG_CRASH_DUMP + if (OLDMEM_BASE) + /* Forget all memory above the running kdump system */ + memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); +#endif +} - ROOT_DEV = Root_RAM0; -#ifndef CONFIG_ARCH_S390X - memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ - /* - * We need some free virtual space to be able to do vmalloc. - * On a machine with 2GB memory we make sure that we have at - * least 128 MB free space for vmalloc. - */ - if (memory_end > 1920*1024*1024) - memory_end = 1920*1024*1024; -#else /* CONFIG_ARCH_S390X */ - memory_end = memory_size & ~0x200000UL; /* detected in head.s */ -#endif /* CONFIG_ARCH_S390X */ +/* + * Reserve memory for kdump kernel to be loaded with kexec + */ +static void __init reserve_crashkernel(void) +{ +#ifdef CONFIG_CRASH_DUMP + unsigned long long crash_base, crash_size; + phys_addr_t low, high; + int rc; - init_mm.start_code = PAGE_OFFSET; - init_mm.end_code = (unsigned long) &_etext; - init_mm.end_data = (unsigned long) &_edata; - init_mm.brk = (unsigned long) &_end; + rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, + &crash_base); - parse_cmdline_early(cmdline_p); + crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); + crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); + if (rc || crash_size == 0) + return; - setup_memory(); - setup_resources(); - setup_lowcore(); + if (memblock.memory.regions[0].size < crash_size) { + pr_info("crashkernel reservation failed: %s\n", + "first memory chunk must be at least crashkernel size"); + return; + } - cpu_init(); - __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; + low = crash_base ?: OLDMEM_BASE; + high = low + crash_size; + if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) { + /* The crashkernel fits into OLDMEM, reuse OLDMEM */ + crash_base = low; + } else { + /* Find suitable area in free memory */ + low = max_t(unsigned long, crash_size, sclp_get_hsa_size()); + high = crash_base ? crash_base + crash_size : ULONG_MAX; - /* - * Create kernel page tables and switch to virtual addressing. - */ - paging_init(); + if (crash_base && crash_base < low) { + pr_info("crashkernel reservation failed: %s\n", + "crash_base too low"); + return; + } + low = crash_base ?: low; + crash_base = memblock_find_in_range(low, high, crash_size, + KEXEC_CRASH_MEM_ALIGN); + } - /* Setup default console */ - conmode_default(); + if (!crash_base) { + pr_info("crashkernel reservation failed: %s\n", + "no suitable area found"); + return; + } + + if (register_memory_notifier(&kdump_mem_nb)) + return; + + if (!OLDMEM_BASE && MACHINE_IS_VM) + diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); + memblock_remove(crash_base, crash_size); + pr_info("Reserving %lluMB of memory at %lluMB " + "for crashkernel (System RAM: %luMB)\n", + crash_size >> 20, crash_base >> 20, + (unsigned long)memblock.memory.total_size >> 20); + os_info_crashkernel_add(crash_base, crash_size); +#endif } -void print_cpu_info(struct cpuinfo_S390 *cpuinfo) +/* + * Reserve the initrd from being used by memblock + */ +static void __init reserve_initrd(void) { - printk("cpu %d " -#ifdef CONFIG_SMP - "phys_idx=%d " -#endif - "vers=%02X ident=%06X machine=%04X unused=%04X\n", - cpuinfo->cpu_nr, -#ifdef CONFIG_SMP - cpuinfo->cpu_addr, +#ifdef CONFIG_BLK_DEV_INITRD + initrd_start = INITRD_START; + initrd_end = initrd_start + INITRD_SIZE; + memblock_reserve(INITRD_START, INITRD_SIZE); #endif - cpuinfo->cpu_id.version, - cpuinfo->cpu_id.ident, - cpuinfo->cpu_id.machine, - cpuinfo->cpu_id.unused); } /* - * show_cpuinfo - Get information on one CPU for use by procfs. + * Check for initrd being in usable memory */ - -static int show_cpuinfo(struct seq_file *m, void *v) +static void __init check_initrd(void) { - struct cpuinfo_S390 *cpuinfo; - unsigned long n = (unsigned long) v - 1; - - preempt_disable(); - if (!n) { - seq_printf(m, "vendor_id : IBM/S390\n" - "# processors : %i\n" - "bogomips per cpu: %lu.%02lu\n", - num_online_cpus(), loops_per_jiffy/(500000/HZ), - (loops_per_jiffy/(5000/HZ))%100); +#ifdef CONFIG_BLK_DEV_INITRD + if (INITRD_START && INITRD_SIZE && + !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) { + pr_err("initrd does not fit memory.\n"); + memblock_free(INITRD_START, INITRD_SIZE); + initrd_start = initrd_end = 0; } - if (cpu_online(n)) { -#ifdef CONFIG_SMP - if (smp_processor_id() == n) - cpuinfo = &S390_lowcore.cpu_data; - else - cpuinfo = &lowcore_ptr[n]->cpu_data; -#else - cpuinfo = &S390_lowcore.cpu_data; #endif - seq_printf(m, "processor %li: " - "version = %02X, " - "identification = %06X, " - "machine = %04X\n", - n, cpuinfo->cpu_id.version, - cpuinfo->cpu_id.ident, - cpuinfo->cpu_id.machine); - } - preempt_enable(); - return 0; } -static void *c_start(struct seq_file *m, loff_t *pos) +/* + * Reserve all kernel text + */ +static void __init reserve_kernel(void) { - return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; + unsigned long start_pfn; + start_pfn = PFN_UP(__pa(&_end)); + + /* + * Reserve memory used for lowcore/command line/kernel image. + */ + memblock_reserve(0, (unsigned long)_ehead); + memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) + - (unsigned long)_stext); } -static void *c_next(struct seq_file *m, void *v, loff_t *pos) + +static void __init reserve_elfcorehdr(void) { - ++*pos; - return c_start(m, pos); +#ifdef CONFIG_CRASH_DUMP + if (is_kdump_kernel()) + memblock_reserve(elfcorehdr_addr - OLDMEM_BASE, + PAGE_ALIGN(elfcorehdr_size)); +#endif } -static void c_stop(struct seq_file *m, void *v) + +static void __init setup_memory(void) { -} -struct seq_operations cpuinfo_op = { - .start = c_start, - .next = c_next, - .stop = c_stop, - .show = show_cpuinfo, -}; + struct memblock_region *reg; -#define DEFINE_IPL_ATTR(_name, _format, _value) \ -static ssize_t ipl_##_name##_show(struct subsystem *subsys, \ - char *page) \ -{ \ - return sprintf(page, _format, _value); \ -} \ -static struct subsys_attribute ipl_##_name##_attr = \ - __ATTR(_name, S_IRUGO, ipl_##_name##_show, NULL); - -DEFINE_IPL_ATTR(wwpn, "0x%016llx\n", (unsigned long long) - IPL_PARMBLOCK_START->fcp.wwpn); -DEFINE_IPL_ATTR(lun, "0x%016llx\n", (unsigned long long) - IPL_PARMBLOCK_START->fcp.lun); -DEFINE_IPL_ATTR(bootprog, "%lld\n", (unsigned long long) - IPL_PARMBLOCK_START->fcp.bootprog); -DEFINE_IPL_ATTR(br_lba, "%lld\n", (unsigned long long) - IPL_PARMBLOCK_START->fcp.br_lba); - -enum ipl_type_type { - ipl_type_unknown, - ipl_type_ccw, - ipl_type_fcp, -}; + /* + * Init storage key for present memory + */ + for_each_memblock(memory, reg) { + storage_key_init_range(reg->base, reg->base + reg->size); + } + psw_set_key(PAGE_DEFAULT_KEY); -static enum ipl_type_type -get_ipl_type(void) -{ - struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; - - if (!IPL_DEVNO_VALID) - return ipl_type_unknown; - if (!IPL_PARMBLOCK_VALID) - return ipl_type_ccw; - if (ipl->hdr.header.version > IPL_MAX_SUPPORTED_VERSION) - return ipl_type_unknown; - if (ipl->fcp.pbt != IPL_TYPE_FCP) - return ipl_type_unknown; - return ipl_type_fcp; + /* Only cosmetics */ + memblock_enforce_memory_limit(memblock_end_of_DRAM()); } -static ssize_t -ipl_type_show(struct subsystem *subsys, char *page) +/* + * Setup hardware capabilities. + */ +static void __init setup_hwcaps(void) { - switch (get_ipl_type()) { - case ipl_type_ccw: - return sprintf(page, "ccw\n"); - case ipl_type_fcp: - return sprintf(page, "fcp\n"); - default: - return sprintf(page, "unknown\n"); - } -} + static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; + struct cpuid cpu_id; + int i; -static struct subsys_attribute ipl_type_attr = __ATTR_RO(ipl_type); + /* + * The store facility list bits numbers as found in the principles + * of operation are numbered with bit 1UL<<31 as number 0 to + * bit 1UL<<0 as number 31. + * Bit 0: instructions named N3, "backported" to esa-mode + * Bit 2: z/Architecture mode is active + * Bit 7: the store-facility-list-extended facility is installed + * Bit 17: the message-security assist is installed + * Bit 19: the long-displacement facility is installed + * Bit 21: the extended-immediate facility is installed + * Bit 22: extended-translation facility 3 is installed + * Bit 30: extended-translation facility 3 enhancement facility + * These get translated to: + * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, + * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, + * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and + * HWCAP_S390_ETF3EH bit 8 (22 && 30). + */ + for (i = 0; i < 6; i++) + if (test_facility(stfl_bits[i])) + elf_hwcap |= 1UL << i; -static ssize_t -ipl_device_show(struct subsystem *subsys, char *page) -{ - struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; - - switch (get_ipl_type()) { - case ipl_type_ccw: - return sprintf(page, "0.0.%04x\n", ipl_devno); - case ipl_type_fcp: - return sprintf(page, "0.0.%04x\n", ipl->fcp.devno); - default: - return 0; + if (test_facility(22) && test_facility(30)) + elf_hwcap |= HWCAP_S390_ETF3EH; + + /* + * Check for additional facilities with store-facility-list-extended. + * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 + * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information + * as stored by stfl, bits 32-xxx contain additional facilities. + * How many facility words are stored depends on the number of + * doublewords passed to the instruction. The additional facilities + * are: + * Bit 42: decimal floating point facility is installed + * Bit 44: perform floating point operation facility is installed + * translated to: + * HWCAP_S390_DFP bit 6 (42 && 44). + */ + if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44)) + elf_hwcap |= HWCAP_S390_DFP; + + /* + * Huge page support HWCAP_S390_HPAGE is bit 7. + */ + if (MACHINE_HAS_HPAGE) + elf_hwcap |= HWCAP_S390_HPAGE; + +#if defined(CONFIG_64BIT) + /* + * 64-bit register support for 31-bit processes + * HWCAP_S390_HIGH_GPRS is bit 9. + */ + elf_hwcap |= HWCAP_S390_HIGH_GPRS; + + /* + * Transactional execution support HWCAP_S390_TE is bit 10. + */ + if (test_facility(50) && test_facility(73)) + elf_hwcap |= HWCAP_S390_TE; +#endif + + get_cpu_id(&cpu_id); + switch (cpu_id.machine) { + case 0x9672: +#if !defined(CONFIG_64BIT) + default: /* Use "g5" as default for 31 bit kernels. */ +#endif + strcpy(elf_platform, "g5"); + break; + case 0x2064: + case 0x2066: +#if defined(CONFIG_64BIT) + default: /* Use "z900" as default for 64 bit kernels. */ +#endif + strcpy(elf_platform, "z900"); + break; + case 0x2084: + case 0x2086: + strcpy(elf_platform, "z990"); + break; + case 0x2094: + case 0x2096: + strcpy(elf_platform, "z9-109"); + break; + case 0x2097: + case 0x2098: + strcpy(elf_platform, "z10"); + break; + case 0x2817: + case 0x2818: + strcpy(elf_platform, "z196"); + break; + case 0x2827: + case 0x2828: + strcpy(elf_platform, "zEC12"); + break; } } -static struct subsys_attribute ipl_device_attr = - __ATTR(device, S_IRUGO, ipl_device_show, NULL); - -static struct attribute *ipl_fcp_attrs[] = { - &ipl_type_attr.attr, - &ipl_device_attr.attr, - &ipl_wwpn_attr.attr, - &ipl_lun_attr.attr, - &ipl_bootprog_attr.attr, - &ipl_br_lba_attr.attr, - NULL, -}; +/* + * Setup function called from init/main.c just after the banner + * was printed. + */ -static struct attribute_group ipl_fcp_attr_group = { - .attrs = ipl_fcp_attrs, -}; +void __init setup_arch(char **cmdline_p) +{ + /* + * print what head.S has found out about the machine + */ +#ifndef CONFIG_64BIT + if (MACHINE_IS_VM) + pr_info("Linux is running as a z/VM " + "guest operating system in 31-bit mode\n"); + else if (MACHINE_IS_LPAR) + pr_info("Linux is running natively in 31-bit mode\n"); + if (MACHINE_HAS_IEEE) + pr_info("The hardware system has IEEE compatible " + "floating point units\n"); + else + pr_info("The hardware system has no IEEE compatible " + "floating point units\n"); +#else /* CONFIG_64BIT */ + if (MACHINE_IS_VM) + pr_info("Linux is running as a z/VM " + "guest operating system in 64-bit mode\n"); + else if (MACHINE_IS_KVM) + pr_info("Linux is running under KVM in 64-bit mode\n"); + else if (MACHINE_IS_LPAR) + pr_info("Linux is running natively in 64-bit mode\n"); +#endif /* CONFIG_64BIT */ + + /* Have one command line that is parsed and saved in /proc/cmdline */ + /* boot_command_line has been already set up in early.c */ + *cmdline_p = boot_command_line; -static struct attribute *ipl_ccw_attrs[] = { - &ipl_type_attr.attr, - &ipl_device_attr.attr, - NULL, -}; + ROOT_DEV = Root_RAM0; -static struct attribute_group ipl_ccw_attr_group = { - .attrs = ipl_ccw_attrs, -}; + /* Is init_mm really needed? */ + init_mm.start_code = PAGE_OFFSET; + init_mm.end_code = (unsigned long) &_etext; + init_mm.end_data = (unsigned long) &_edata; + init_mm.brk = (unsigned long) &_end; -static struct attribute *ipl_unknown_attrs[] = { - &ipl_type_attr.attr, - NULL, -}; + parse_early_param(); + os_info_init(); + setup_ipl(); -static struct attribute_group ipl_unknown_attr_group = { - .attrs = ipl_unknown_attrs, -}; + /* Do some memory reservations *before* memory is added to memblock */ + reserve_memory_end(); + reserve_oldmem(); + reserve_kernel(); + reserve_initrd(); + reserve_elfcorehdr(); + memblock_allow_resize(); -static ssize_t -ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off, size_t count) -{ - unsigned int size = IPL_PARMBLOCK_SIZE; + /* Get information about *all* installed memory */ + detect_memory_memblock(); - if (off > size) - return 0; - if (off + count > size) - count = size - off; + remove_oldmem(); - memcpy(buf, (void *) IPL_PARMBLOCK_START + off, count); - return count; -} + /* + * Make sure all chunks are MAX_ORDER aligned so we don't need the + * extra checks that HOLES_IN_ZONE would require. + * + * Is this still required? + */ + memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT)); -static struct bin_attribute ipl_parameter_attr = { - .attr = { - .name = "binary_parameter", - .mode = S_IRUGO, - .owner = THIS_MODULE, - }, - .size = PAGE_SIZE, - .read = &ipl_parameter_read, -}; + setup_memory_end(); + setup_memory(); -static ssize_t -ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off, size_t count) -{ - unsigned int size = IPL_PARMBLOCK_START->fcp.scp_data_len; - void *scp_data = &IPL_PARMBLOCK_START->fcp.scp_data; + check_initrd(); + reserve_crashkernel(); - if (off > size) - return 0; - if (off + count > size) - count = size - off; + setup_resources(); + setup_vmcoreinfo(); + setup_lowcore(); + smp_fill_possible_mask(); + cpu_init(); + s390_init_cpu_topology(); - memcpy(buf, scp_data + off, count); - return count; -} + /* + * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). + */ + setup_hwcaps(); -static struct bin_attribute ipl_scp_data_attr = { - .attr = { - .name = "scp_data", - .mode = S_IRUGO, - .owner = THIS_MODULE, - }, - .size = PAGE_SIZE, - .read = &ipl_scp_data_read, -}; + /* + * Create kernel page tables and switch to virtual addressing. + */ + paging_init(); -static decl_subsys(ipl, NULL, NULL); + /* Setup default console */ + conmode_default(); + set_preferred_console(); -static int __init -ipl_device_sysfs_register(void) { - int rc; + /* Setup zfcpdump support */ + setup_zfcpdump(); +} - rc = firmware_register(&ipl_subsys); - if (rc) - return rc; +#ifdef CONFIG_32BIT +static int no_removal_warning __initdata; - switch (get_ipl_type()) { - case ipl_type_ccw: - sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group); - break; - case ipl_type_fcp: - sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group); - sysfs_create_bin_file(&ipl_subsys.kset.kobj, - &ipl_parameter_attr); - sysfs_create_bin_file(&ipl_subsys.kset.kobj, - &ipl_scp_data_attr); - break; - default: - sysfs_create_group(&ipl_subsys.kset.kobj, - &ipl_unknown_attr_group); - break; - } +static int __init parse_no_removal_warning(char *str) +{ + no_removal_warning = 1; return 0; } +__setup("no_removal_warning", parse_no_removal_warning); -__initcall(ipl_device_sysfs_register); +static int __init removal_warning(void) +{ + if (no_removal_warning) + return 0; + printk(KERN_ALERT "\n\n"); + printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n"); + printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n"); + printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n"); + printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n"); + printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n"); + printk(KERN_CONT "please let us know. Please write to:\n"); + printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n"); + printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n"); + printk(KERN_CONT "Thank you!\n\n"); + printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n"); + printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n"); + schedule_timeout_uninterruptible(300 * HZ); + return 0; +} +early_initcall(removal_warning); +#endif |
