diff options
113 files changed, 4408 insertions, 1632 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-ksm b/Documentation/ABI/testing/sysfs-kernel-mm-ksm new file mode 100644 index 00000000000..73e653ee248 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-mm-ksm @@ -0,0 +1,52 @@ +What: /sys/kernel/mm/ksm +Date: September 2009 +KernelVersion: 2.6.32 +Contact: Linux memory management mailing list <linux-mm@kvack.org> +Description: Interface for Kernel Samepage Merging (KSM) + +What: /sys/kernel/mm/ksm/full_scans +What: /sys/kernel/mm/ksm/pages_shared +What: /sys/kernel/mm/ksm/pages_sharing +What: /sys/kernel/mm/ksm/pages_to_scan +What: /sys/kernel/mm/ksm/pages_unshared +What: /sys/kernel/mm/ksm/pages_volatile +What: /sys/kernel/mm/ksm/run +What: /sys/kernel/mm/ksm/sleep_millisecs +Date: September 2009 +Contact: Linux memory management mailing list <linux-mm@kvack.org> +Description: Kernel Samepage Merging daemon sysfs interface + + full_scans: how many times all mergeable areas have been + scanned. + + pages_shared: how many shared pages are being used. + + pages_sharing: how many more sites are sharing them i.e. how + much saved. + + pages_to_scan: how many present pages to scan before ksmd goes + to sleep. + + pages_unshared: how many pages unique but repeatedly checked + for merging. + + pages_volatile: how many pages changing too fast to be placed + in a tree. + + run: write 0 to disable ksm, read 0 while ksm is disabled. + write 1 to run ksm, read 1 while ksm is running. + write 2 to disable ksm and unmerge all its pages. + + sleep_millisecs: how many milliseconds ksm should sleep between + scans. + + See Documentation/vm/ksm.txt for more information. + +What: /sys/kernel/mm/ksm/merge_across_nodes +Date: January 2013 +KernelVersion: 3.9 +Contact: Linux memory management mailing list <linux-mm@kvack.org> +Description: Control merging pages across different NUMA nodes. + + When it is set to 0 only pages from the same node are merged, + otherwise pages from all nodes can be merged together (default). diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 9aa8ff3e54d..766087781ec 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1640,6 +1640,42 @@ bytes respectively. Such letter suffixes can also be entirely omitted. that the amount of memory usable for all allocations is not too small. + movablemem_map=acpi + [KNL,X86,IA-64,PPC] This parameter is similar to + memmap except it specifies the memory map of + ZONE_MOVABLE. + This option inform the kernel to use Hot Pluggable bit + in flags from SRAT from ACPI BIOS to determine which + memory devices could be hotplugged. The corresponding + memory ranges will be set as ZONE_MOVABLE. + NOTE: Whatever node the kernel resides in will always + be un-hotpluggable. + + movablemem_map=nn[KMG]@ss[KMG] + [KNL,X86,IA-64,PPC] This parameter is similar to + memmap except it specifies the memory map of + ZONE_MOVABLE. + If user specifies memory ranges, the info in SRAT will + be ingored. And it works like the following: + - If more ranges are all within one node, then from + lowest ss to the end of the node will be ZONE_MOVABLE. + - If a range is within a node, then from ss to the end + of the node will be ZONE_MOVABLE. + - If a range covers two or more nodes, then from ss to + the end of the 1st node will be ZONE_MOVABLE, and all + the rest nodes will only have ZONE_MOVABLE. + If memmap is specified at the same time, the + movablemem_map will be limited within the memmap + areas. If kernelcore or movablecore is also specified, + movablemem_map will have higher priority to be + satisfied. So the administrator should be careful that + the amount of movablemem_map areas are not too large. + Otherwise kernel won't have enough memory to start. + NOTE: We don't stop users specifying the node the + kernel resides in as hotpluggable so that this + option can be used as a workaround of firmware + bugs. + MTD_Partition= [MTD] Format: <name>,<region-number>,<size>,<offset> diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt index b392e496f81..f34a8ee6f86 100644 --- a/Documentation/vm/ksm.txt +++ b/Documentation/vm/ksm.txt @@ -58,6 +58,21 @@ sleep_millisecs - how many milliseconds ksmd should sleep before next scan e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs" Default: 20 (chosen for demonstration purposes) +merge_across_nodes - specifies if pages from different numa nodes can be merged. + When set to 0, ksm merges only pages which physically + reside in the memory area of same NUMA node. That brings + lower latency to access of shared pages. Systems with more + nodes, at significant NUMA distances, are likely to benefit + from the lower latency of setting 0. Smaller systems, which + need to minimize memory usage, are likely to benefit from + the greater sharing of setting 1 (default). You may wish to + compare how your system performs under each setting, before + deciding on which to use. merge_across_nodes setting can be + changed only when there are no ksm shared pages in system: + set run 2 to unmerge pages first, then to 1 after changing + merge_across_nodes, to remerge according to the new setting. + Default: 1 (merging across nodes as in earlier releases) + run - set 0 to stop ksmd from running but keep merged pages, set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run", set 2 to stop ksmd and unmerge all pages currently merged, diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f4dd585898c..224b44ab534 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -434,4 +434,7 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } #endif /* CONFIG_ARM64_64K_PAGES */ +void vmemmap_free(struct page *memmap, unsigned long nr_pages) +{ +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 1516d1dc11f..80dab509dfb 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -93,7 +93,7 @@ void show_mem(unsigned int filter) printk(KERN_INFO "%d pages swap cached\n", total_cached); printk(KERN_INFO "Total of %ld pages in page table cache\n", quicklist_total_size()); - printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); + printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); } diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index c641333cd99..c2e955ee79a 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -666,7 +666,7 @@ void show_mem(unsigned int filter) printk(KERN_INFO "%d pages swap cached\n", total_cached); printk(KERN_INFO "Total of %ld pages in page table cache\n", quicklist_total_size()); - printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); + printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); } /** @@ -822,4 +822,8 @@ int __meminit vmemmap_populate(struct page *start_page, { return vmemmap_populate_basepages(start_page, size, node); } + +void vmemmap_free(struct page *memmap, unsigned long nr_pages) +{ +} #endif diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index b755ea92aea..20bc967c720 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -688,6 +688,24 @@ int arch_add_memory(int nid, u64 start, u64 size) return ret; } + +#ifdef CONFIG_MEMORY_HOTREMOVE +int arch_remove_memory(u64 start, u64 size) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + struct zone *zone; + int ret; + + zone = page_zone(pfn_to_page(start_pfn)); + ret = __remove_pages(zone, start_pfn, nr_pages); + if (ret) + pr_warn("%s: Problem encountered in __remove_pages() as" + " ret=%d\n", __func__, ret); + + return ret; +} +#endif #endif /* diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 95a45293e5a..7e2246fb2f3 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -297,5 +297,10 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } + +void vmemmap_free(struct page *memmap, unsigned long nr_pages) +{< |