aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/kernel/setup.c
diff options
context:
space:
mode:
authorMichael Holzheu <holzheu@linux.vnet.ibm.com>2011-10-30 15:16:40 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-10-30 15:16:42 +0100
commit60a0c68df2632feaa4a986af084650d1165d89c5 (patch)
treef55907defeab43de02a5a3127c8d5a694a21b3a2 /arch/s390/kernel/setup.c
parent7f0bf656c66e4292e965c95fd9de55c72b6578bb (diff)
[S390] kdump backend code
This patch provides the architecture specific part of the s390 kdump support. Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/setup.c')
-rw-r--r--arch/s390/kernel/setup.c208
1 files changed, 208 insertions, 0 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7b371c37061..b5a30412b2e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -42,6 +42,9 @@
#include <linux/reboot.h>
#include <linux/topology.h>
#include <linux/ftrace.h>
+#include <linux/kexec.h>
+#include <linux/crash_dump.h>
+#include <linux/memory.h>
#include <asm/ipl.h>
#include <asm/uaccess.h>
@@ -57,6 +60,7 @@
#include <asm/ebcdic.h>
#include <asm/compat.h>
#include <asm/kvm_virtio.h>
+#include <asm/diag.h>
long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
@@ -435,6 +439,9 @@ static void __init setup_resources(void)
for (i = 0; i < MEMORY_CHUNKS; i++) {
if (!memory_chunk[i].size)
continue;
+ if (memory_chunk[i].type == CHUNK_OLDMEM ||
+ memory_chunk[i].type == CHUNK_CRASHK)
+ continue;
res = alloc_bootmem_low(sizeof(*res));
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
switch (memory_chunk[i].type) {
@@ -479,6 +486,7 @@ static void __init setup_memory_end(void)
unsigned long max_mem;
int i;
+
#ifdef CONFIG_ZFCPDUMP
if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
memory_end = ZFCPDUMP_HSA_SIZE;
@@ -550,6 +558,187 @@ static void __init setup_restart_psw(void)
copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
}
+#ifdef CONFIG_CRASH_DUMP
+
+/*
+ * Find suitable location for crashkernel memory
+ */
+static unsigned long __init find_crash_base(unsigned long crash_size,
+ char **msg)
+{
+ unsigned long crash_base;
+ struct mem_chunk *chunk;
+ int i;
+
+ if (memory_chunk[0].size < crash_size) {
+ *msg = "first memory chunk must be at least crashkernel size";
+ return 0;
+ }
+ if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
+ return OLDMEM_BASE;
+
+ for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
+ chunk = &memory_chunk[i];
+ if (chunk->size == 0)
+ continue;
+ if (chunk->type != CHUNK_READ_WRITE)
+ continue;
+ if (chunk->size < crash_size)
+ continue;
+ crash_base = (chunk->addr + chunk->size) - crash_size;
+ if (crash_base < crash_size)
+ continue;
+ if (crash_base < ZFCPDUMP_HSA_SIZE_MAX)
+ continue;
+ if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
+ continue;
+ return crash_base;
+ }
+ *msg = "no suitable area found";
+ return 0;
+}
+
+/*
+ * Check if crash_base and crash_size is valid
+ */
+static int __init verify_crash_base(unsigned long crash_base,
+ unsigned long crash_size,
+ char **msg)
+{
+ struct mem_chunk *chunk;
+ int i;
+
+ /*
+ * Because we do the swap to zero, we must have at least 'crash_size'
+ * bytes free space before crash_base
+ */
+ if (crash_size > crash_base) {
+ *msg = "crashkernel offset must be greater than size";
+ return -EINVAL;
+ }
+
+ /* First memory chunk must be at least crash_size */
+ if (memory_chunk[0].size < crash_size) {
+ *msg = "first memory chunk must be at least crashkernel size";
+ return -EINVAL;
+ }
+ /* Check if we fit into the respective memory chunk */
+ for (i = 0; i < MEMORY_CHUNKS; i++) {
+ chunk = &memory_chunk[i];
+ if (chunk->size == 0)
+ continue;
+ if (crash_base < chunk->addr)
+ continue;
+ if (crash_base >= chunk->addr + chunk->size)
+ continue;
+ /* we have found the memory chunk */
+ if (crash_base + crash_size > chunk->addr + chunk->size) {
+ *msg = "selected memory chunk is too small for "
+ "crashkernel memory";
+ return -EINVAL;
+ }
+ return 0;
+ }
+ *msg = "invalid memory range specified";
+ return -EINVAL;
+}
+
+/*
+ * Reserve kdump memory by creating a memory hole in the mem_chunk array
+ */
+static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
+ int type)
+{
+
+ create_mem_hole(memory_chunk, addr, size, type);
+}
+
+/*
+ * When kdump is enabled, we have to ensure that no memory from
+ * the area [0 - crashkernel memory size] and
+ * [crashk_res.start - crashk_res.end] is set offline.
+ */
+static int kdump_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct memory_notify *arg = data;
+
+ if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
+ return NOTIFY_BAD;
+ if (arg->start_pfn > PFN_DOWN(crashk_res.end))
+ return NOTIFY_OK;
+ if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
+ return NOTIFY_OK;
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block kdump_mem_nb = {
+ .notifier_call = kdump_mem_notifier,
+};
+
+#endif
+
+/*
+ * Make sure that oldmem, where the dump is stored, is protected
+ */
+static void reserve_oldmem(void)
+{
+#ifdef CONFIG_CRASH_DUMP
+ if (!OLDMEM_BASE)
+ return;
+
+ reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM);
+ reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE,
+ CHUNK_OLDMEM);
+ if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size)
+ saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
+ else
+ saved_max_pfn = PFN_DOWN(real_memory_size) - 1;
+#endif
+}
+
+/*
+ * Reserve memory for kdump kernel to be loaded with kexec
+ */
+static void __init reserve_crashkernel(void)
+{
+#ifdef CONFIG_CRASH_DUMP
+ unsigned long long crash_base, crash_size;
+ char *msg;
+ int rc;
+
+ rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
+ &crash_base);
+ if (rc || crash_size == 0)
+ return;
+ crash_base = PAGE_ALIGN(crash_base);
+ crash_size = PAGE_ALIGN(crash_size);
+ if (register_memory_notifier(&kdump_mem_nb))
+ return;
+ if (!crash_base)
+ crash_base = find_crash_base(crash_size, &msg);
+ if (!crash_base) {
+ pr_info("crashkernel reservation failed: %s\n", msg);
+ unregister_memory_notifier(&kdump_mem_nb);
+ return;
+ }
+ if (verify_crash_base(crash_base, crash_size, &msg)) {
+ pr_info("crashkernel reservation failed: %s\n", msg);
+ unregister_memory_notifier(&kdump_mem_nb);
+ return;
+ }
+ if (!OLDMEM_BASE && MACHINE_IS_VM)
+ diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+ insert_resource(&iomem_resource, &crashk_res);
+ reserve_kdump_bootmem(crash_base, crash_size, CHUNK_READ_WRITE);
+ pr_info("Reserving %lluMB of memory at %lluMB "
+ "for crashkernel (System RAM: %luMB)\n",
+ crash_size >> 20, crash_base >> 20, memory_end >> 20);
+#endif
+}
+
static void __init
setup_memory(void)
{
@@ -580,6 +769,14 @@ setup_memory(void)
if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
+#ifdef CONFIG_CRASH_DUMP
+ if (OLDMEM_BASE) {
+ /* Move initrd behind kdump oldmem */
+ if (start + INITRD_SIZE > OLDMEM_BASE &&
+ start < OLDMEM_BASE + OLDMEM_SIZE)
+ start = OLDMEM_BASE + OLDMEM_SIZE;
+ }
+#endif
if (start + INITRD_SIZE > memory_end) {
pr_err("initrd extends beyond end of "
"memory (0x%08lx > 0x%08lx) "
@@ -644,6 +841,15 @@ setup_memory(void)
reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
BOOTMEM_DEFAULT);
+#ifdef CONFIG_CRASH_DUMP
+ if (crashk_res.start)
+ reserve_bootmem(crashk_res.start,
+ crashk_res.end - crashk_res.start + 1,
+ BOOTMEM_DEFAULT);
+ if (is_kdump_kernel())
+ reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
+ PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
+#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (INITRD_START && INITRD_SIZE) {
if (INITRD_START + INITRD_SIZE <= memory_end) {
@@ -812,6 +1018,8 @@ setup_arch(char **cmdline_p)
setup_ipl();
setup_memory_end();
setup_addressing_mode();
+ reserve_oldmem();
+ reserve_crashkernel();
setup_memory();
setup_resources();
setup_restart_psw();