aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/init.c')
-rw-r--r--arch/s390/mm/init.c267
1 files changed, 136 insertions, 131 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 8053245fe25..0c1073ed1e8 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/mm/init.c
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com)
*
* Derived from "arch/i386/mm/init.c"
@@ -23,11 +21,13 @@
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
+#include <linux/memory.h>
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/initrd.h>
+#include <linux/export.h>
+#include <linux/gfp.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -36,68 +36,60 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+#include <asm/ctl_reg.h>
+#include <asm/sclp.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
-char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
-void show_mem(void)
+unsigned long empty_zero_page, zero_page_mask;
+EXPORT_SYMBOL(empty_zero_page);
+
+static void __init setup_zero_pages(void)
{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
+ struct cpuid cpu_id;
+ unsigned int order;
struct page *page;
+ int i;
- printk("Mem-info:\n");
- show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
- i = max_mapnr;
- while (i-- > 0) {
- if (!pfn_valid(i))
- continue;
- page = pfn_to_page(i);
- total++;
- if (PageReserved(page))
- reserved++;
- else if (PageSwapCache(page))
- cached++;
- else if (page_count(page))
- shared += page_count(page) - 1;
+ get_cpu_id(&cpu_id);
+ switch (cpu_id.machine) {
+ case 0x9672: /* g5 */
+ case 0x2064: /* z900 */
+ case 0x2066: /* z900 */
+ case 0x2084: /* z990 */
+ case 0x2086: /* z990 */
+ case 0x2094: /* z9-109 */
+ case 0x2096: /* z9-109 */
+ order = 0;
+ break;
+ case 0x2097: /* z10 */
+ case 0x2098: /* z10 */
+ case 0x2817: /* z196 */
+ case 0x2818: /* z196 */
+ order = 2;
+ break;
+ case 0x2827: /* zEC12 */
+ case 0x2828: /* zEC12 */
+ default:
+ order = 5;
+ break;
}
- printk("%d pages of RAM\n", total);
- printk("%d reserved pages\n", reserved);
- printk("%d pages shared\n", shared);
- printk("%d pages swap cached\n", cached);
-
- printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
- printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK));
- printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
- printk("%lu pages slab\n",
- global_page_state(NR_SLAB_RECLAIMABLE) +
- global_page_state(NR_SLAB_UNRECLAIMABLE));
- printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE));
-}
-
-static void __init setup_ro_region(void)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t new_pte;
- unsigned long address, end;
-
- address = ((unsigned long)&_stext) & PAGE_MASK;
- end = PFN_ALIGN((unsigned long)&_eshared);
-
- for (; address < end; address += PAGE_SIZE) {
- pgd = pgd_offset_k(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
- new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
- *pte = new_pte;
+ /* Limit number of empty zero pages for small memory sizes */
+ if (order > 2 && totalram_pages <= 16384)
+ order = 2;
+
+ empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!empty_zero_page)
+ panic("Out of memory in setup_zero_pages");
+
+ page = virt_to_page((void *) empty_zero_page);
+ split_page(page, order);
+ for (i = 1 << order; i > 0; i--) {
+ mark_page_reserved(page);
+ page++;
}
+
+ zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
/*
@@ -105,122 +97,135 @@ static void __init setup_ro_region(void)
*/
void __init paging_init(void)
{
- static const int ssm_mask = 0x04000000L;
unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long pgd_type;
+ unsigned long pgd_type, asce_bits;
init_mm.pgd = swapper_pg_dir;
- S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
- /* A three level page table (4TB) is enough for the kernel space. */
- S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
- pgd_type = _REGION3_ENTRY_EMPTY;
+ if (VMALLOC_END > (1UL << 42)) {
+ asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
+ pgd_type = _REGION2_ENTRY_EMPTY;
+ } else {
+ asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
+ pgd_type = _REGION3_ENTRY_EMPTY;
+ }
#else
- S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
+ asce_bits = _ASCE_TABLE_LENGTH;
pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
+ S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
- setup_ro_region();
/* enable virtual mapping in kernel mode */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
- __raw_local_irq_ssm(ssm_mask);
+ arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
+ sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
-#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
}
void __init mem_init(void)
{
- unsigned long codesize, reservedpages, datasize, initsize;
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
+ cpumask_set_cpu(0, mm_cpumask(&init_mm));
+ atomic_set(&init_mm.context.attach_count, 1);
- max_mapnr = num_physpages = max_low_pfn;
+ max_mapnr = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- /* clear the zero-page */
- memset(empty_zero_page, 0, PAGE_SIZE);
+ /* Setup guest page hinting */
+ cmma_init();
/* this will put all low memory onto the freelists */
- totalram_pages += free_all_bootmem();
-
- reservedpages = 0;
-
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
- printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >>10,
- initsize >> 10);
+ free_all_bootmem();
+ setup_zero_pages(); /* Setup zeroed pages. */
+
+ mem_init_print_info(NULL);
printk("Write protected kernel read-only data: %#lx - %#lx\n",
(unsigned long)&_stext,
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
+void free_initmem(void)
+{
+ free_initmem_default(POISON_FREE_INITMEM);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long address;
- int i;
+ free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
+ "initrd");
+}
+#endif
- for (i = 0; i < numpages; i++) {
- address = page_to_phys(page + i);
- pgd = pgd_offset_k(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
- if (!enable) {
- ptep_invalidate(&init_mm, address, pte);
- continue;
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size)
+{
+ unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
+ unsigned long start_pfn = PFN_DOWN(start);
+ unsigned long size_pages = PFN_DOWN(size);
+ struct zone *zone;
+ int rc;
+
+ rc = vmem_add_mapping(start, size);
+ if (rc)
+ return rc;
+ for_each_zone(zone) {
+ if (zone_idx(zone) != ZONE_MOVABLE) {
+ /* Add range within existing zone limits */
+ zone_start_pfn = zone->zone_start_pfn;
+ zone_end_pfn = zone->zone_start_pfn +
+ zone->spanned_pages;
+ } else {
+ /* Add remaining range to ZONE_MOVABLE */
+ zone_start_pfn = start_pfn;
+ zone_end_pfn = start_pfn + size_pages;
}
- *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
- /* Flush cpu write queue. */
- mb();
+ if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
+ continue;
+ nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
+ zone_end_pfn - start_pfn : size_pages;
+ rc = __add_pages(nid, zone, start_pfn, nr_pages);
+ if (rc)
+ break;
+ start_pfn += nr_pages;
+ size_pages -= nr_pages;
+ if (!size_pages)
+ break;
}
+ if (rc)
+ vmem_remove_mapping(start, size);
+ return rc;
}
-#endif
-void free_initmem(void)
+unsigned long memory_block_size_bytes(void)
{
- unsigned long addr;
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
- }
- printk ("Freeing unused kernel memory: %ldk freed\n",
- ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
+ /*
+ * Make sure the memory block size is always greater
+ * or equal than the memory increment size.
+ */
+ return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp_get_rzm());
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
+#ifdef CONFIG_MEMORY_HOTREMOVE
+int arch_remove_memory(u64 start, u64 size)
{
- if (start < end)
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ /*
+ * There is no hardware or firmware interface which could trigger a
+ * hot memory remove on s390. So there is nothing that needs to be
+ * implemented.
+ */
+ return -EBUSY;
}
#endif
+#endif /* CONFIG_MEMORY_HOTPLUG */