aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc/mm/init_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm/init_64.c')
-rw-r--r--arch/sparc/mm/init_64.c453
1 files changed, 340 insertions, 113 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 7a9b788c6ce..16b58ff11e6 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -47,6 +47,7 @@
#include <asm/prom.h>
#include <asm/mdesc.h>
#include <asm/cpudata.h>
+#include <asm/setup.h>
#include <asm/irq.h>
#include "init_64.h"
@@ -87,8 +88,8 @@ static unsigned long cpu_pgsz_mask;
#define MAX_BANKS 32
-static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata;
-static int pavail_ents __devinitdata;
+static struct linux_prom64_registers pavail[MAX_BANKS];
+static int pavail_ents;
static int cmp_p64(const void *a, const void *b)
{
@@ -276,7 +277,6 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long
}
unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
-unsigned long _PAGE_SZBITS __read_mostly;
static void flush_dcache(unsigned long pfn)
{
@@ -307,12 +307,39 @@ static void flush_dcache(unsigned long pfn)
}
}
+/* mm->context.lock must be held */
+static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
+ unsigned long tsb_hash_shift, unsigned long address,
+ unsigned long tte)
+{
+ struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
+ unsigned long tag;
+
+ if (unlikely(!tsb))
+ return;
+
+ tsb += ((address >> tsb_hash_shift) &
+ (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
+ tag = (address >> 22UL);
+ tsb_insert(tsb, tag, tte);
+}
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static inline bool is_hugetlb_pte(pte_t pte)
+{
+ if ((tlb_type == hypervisor &&
+ (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
+ (tlb_type != hypervisor &&
+ (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
+ return true;
+ return false;
+}
+#endif
+
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
struct mm_struct *mm;
- struct tsb *tsb;
- unsigned long tag, flags;
- unsigned long tsb_index, tsb_hash_shift;
+ unsigned long flags;
pte_t pte = *ptep;
if (tlb_type != hypervisor) {
@@ -324,28 +351,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
mm = vma->vm_mm;
- tsb_index = MM_TSB_BASE;
- tsb_hash_shift = PAGE_SHIFT;
-
spin_lock_irqsave(&mm->context.lock, flags);
-#ifdef CONFIG_HUGETLB_PAGE
- if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
- if ((tlb_type == hypervisor &&
- (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
- (tlb_type != hypervisor &&
- (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
- tsb_index = MM_TSB_HUGE;
- tsb_hash_shift = HPAGE_SHIFT;
- }
- }
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
+ __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
+ address, pte_val(pte));
+ else
#endif
-
- tsb = mm->context.tsb_block[tsb_index].tsb;
- tsb += ((address >> tsb_hash_shift) &
- (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
- tag = (address >> 22UL);
- tsb_insert(tsb, tag, pte_val(pte));
+ __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
+ address, pte_val(pte));
spin_unlock_irqrestore(&mm->context.lock, flags);
}
@@ -574,7 +589,7 @@ static void __init remap_kernel(void)
int i, tlb_ent = sparc64_highest_locked_tlbent();
tte_vaddr = (unsigned long) KERNBASE;
- phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+ phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
tte_data = kern_large_tte(phys_page);
kern_locked_tte_data = tte_data;
@@ -616,7 +631,7 @@ static void __init inherit_prom_mappings(void)
void prom_world(int enter)
{
if (!enter)
- set_fs((mm_segment_t) { get_thread_current_ds() });
+ set_fs(get_fs());
__asm__ __volatile__("flushw");
}
@@ -667,10 +682,9 @@ void get_new_mmu_context(struct mm_struct *mm)
{
unsigned long ctx, new_ctx;
unsigned long orig_pgsz_bits;
- unsigned long flags;
int new_version;
- spin_lock_irqsave(&ctx_alloc_lock, flags);
+ spin_lock(&ctx_alloc_lock);
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
@@ -706,7 +720,7 @@ void get_new_mmu_context(struct mm_struct *mm)
out:
tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
- spin_unlock_irqrestore(&ctx_alloc_lock, flags);
+ spin_unlock(&ctx_alloc_lock);
if (unlikely(new_version))
smp_new_mmu_context_version();
@@ -781,11 +795,11 @@ struct node_mem_mask {
static struct node_mem_mask node_masks[MAX_NUMNODES];
static int num_node_masks;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+
int numa_cpu_lookup_table[NR_CPUS];
cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
-#ifdef CONFIG_NEED_MULTIPLE_NODES
-
struct mdesc_mblock {
u64 base;
u64 size;
@@ -874,17 +888,21 @@ static void __init allocate_node_data(int nid)
static void init_node_masks_nonnuma(void)
{
+#ifdef CONFIG_NEED_MULTIPLE_NODES
int i;
+#endif
numadbg("Initializing tables for non-numa.\n");
node_masks[0].mask = node_masks[0].val = 0;
num_node_masks = 1;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
for (i = 0; i < NR_CPUS; i++)
numa_cpu_lookup_table[i] = 0;
cpumask_setall(&numa_cpumask_lookup_table[0]);
+#endif
}
#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -1008,7 +1026,8 @@ static void __init add_node_ranges(void)
"start[%lx] end[%lx]\n",
nid, start, this_end);
- memblock_set_node(start, this_end - start, nid);
+ memblock_set_node(start, this_end - start,
+ &memblock.memory, nid);
start = this_end;
}
}
@@ -1085,7 +1104,14 @@ static int __init grab_mblocks(struct mdesc_handle *md)
m->size = *val;
val = mdesc_get_property(md, node,
"address-congruence-offset", NULL);
- m->offset = *val;
+
+ /* The address-congruence-offset property is optional.
+ * Explicity zero it be identifty this.
+ */
+ if (val)
+ m->offset = *val;
+ else
+ m->offset = 0UL;
numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
count - 1, m->base, m->size, m->offset);
@@ -1305,7 +1331,7 @@ static void __init bootmem_init_nonnuma(void)
(top_of_ram - total_ram) >> 20);
init_node_masks_nonnuma();
- memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
allocate_node_data(0);
node_set_online(0);
}
@@ -1537,6 +1563,96 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
return ~0UL;
}
+unsigned long PAGE_OFFSET;
+EXPORT_SYMBOL(PAGE_OFFSET);
+
+static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits)
+{
+ unsigned long final_shift;
+ unsigned int val = *insn;
+ unsigned int cnt;
+
+ /* We are patching in ilog2(max_supported_phys_address), and
+ * we are doing so in a manner similar to a relocation addend.
+ * That is, we are adding the shift value to whatever value
+ * is in the shift instruction count field already.
+ */
+ cnt = (val & 0x3f);
+ val &= ~0x3f;
+
+ /* If we are trying to shift >= 64 bits, clear the destination
+ * register. This can happen when phys_bits ends up being equal
+ * to MAX_PHYS_ADDRESS_BITS.
+ */
+ final_shift = (cnt + (64 - phys_bits));
+ if (final_shift >= 64) {
+ unsigned int rd = (val >> 25) & 0x1f;
+
+ val = 0x80100000 | (rd << 25);
+ } else {
+ val |= final_shift;
+ }
+ *insn = val;
+
+ __asm__ __volatile__("flush %0"
+ : /* no outputs */
+ : "r" (insn));
+}
+
+static void __init page_offset_shift_patch(unsigned long phys_bits)
+{
+ extern unsigned int __page_offset_shift_patch;
+ extern unsigned int __page_offset_shift_patch_end;
+ unsigned int *p;
+
+ p = &__page_offset_shift_patch;
+ while (p < &__page_offset_shift_patch_end) {
+ unsigned int *insn = (unsigned int *)(unsigned long)*p;
+
+ page_offset_shift_patch_one(insn, phys_bits);
+
+ p++;
+ }
+}
+
+static void __init setup_page_offset(void)
+{
+ unsigned long max_phys_bits = 40;
+
+ if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ max_phys_bits = 42;
+ } else if (tlb_type == hypervisor) {
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_NIAGARA1:
+ case SUN4V_CHIP_NIAGARA2:
+ max_phys_bits = 39;
+ break;
+ case SUN4V_CHIP_NIAGARA3:
+ max_phys_bits = 43;
+ break;
+ case SUN4V_CHIP_NIAGARA4:
+ case SUN4V_CHIP_NIAGARA5:
+ case SUN4V_CHIP_SPARC64X:
+ default:
+ max_phys_bits = 47;
+ break;
+ }
+ }
+
+ if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
+ prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
+ max_phys_bits);
+ prom_halt();
+ }
+
+ PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits);
+
+ pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
+ PAGE_OFFSET, max_phys_bits);
+
+ page_offset_shift_patch(max_phys_bits);
+}
+
static void __init tsb_phys_patch(void)
{
struct tsb_ldquad_phys_patch_entry *pquad;
@@ -1674,7 +1790,7 @@ static void __init sun4v_ktsb_init(void)
#endif
}
-void __cpuinit sun4v_ktsb_register(void)
+void sun4v_ktsb_register(void)
{
unsigned long pa, ret;
@@ -1702,7 +1818,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
#ifndef CONFIG_DEBUG_PAGEALLOC
if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
- 0xfffff80000000000UL;
+ PAGE_OFFSET;
kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
_PAGE_P_4V | _PAGE_W_4V);
} else {
@@ -1711,7 +1827,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
- 0xfffff80000000000UL;
+ PAGE_OFFSET;
kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
_PAGE_P_4V | _PAGE_W_4V);
} else {
@@ -1720,7 +1836,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
- 0xfffff80000000000UL;
+ PAGE_OFFSET;
kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
_PAGE_P_4V | _PAGE_W_4V);
} else {
@@ -1732,7 +1848,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
/* paging_init() sets up the page tables */
static unsigned long last_valid_pfn;
-pgd_t swapper_pg_dir[2048];
+pgd_t swapper_pg_dir[PTRS_PER_PGD];
static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void);
@@ -1743,6 +1859,8 @@ void __init paging_init(void)
unsigned long real_end, i;
int node;
+ setup_page_offset();
+
/* These build time checkes make sure that the dcache_dirty_cpu()
* page->flags usage will work.
*
@@ -1768,7 +1886,7 @@ void __init paging_init(void)
BUILD_BUG_ON(NR_CPUS > 4096);
- kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+ kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
/* Invalidate both kernel TSBs. */
@@ -1824,7 +1942,7 @@ void __init paging_init(void)
shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
real_end = (unsigned long)_end;
- num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
+ num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
num_kernel_image_mappings);
@@ -1923,7 +2041,7 @@ void __init paging_init(void)
printk("Booting Linux...\n");
}
-int __devinit page_in_phys_avail(unsigned long paddr)
+int page_in_phys_avail(unsigned long paddr)
{
int i;
@@ -1981,7 +2099,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
if (new_start <= old_start &&
new_end >= (old_start + PAGE_SIZE)) {
- set_bit(old_start >> 22, bitmap);
+ set_bit(old_start >> ILOG2_4MB, bitmap);
goto do_next_page;
}
}
@@ -2013,15 +2131,24 @@ static void __init patch_tlb_miss_handler_bitmap(void)
flushi(&valid_addr_bitmap_insn[0]);
}
+static void __init register_page_bootmem_info(void)
+{
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ int i;
+
+ for_each_online_node(i)
+ if (NODE_DATA(i)->node_spanned_pages)
+ register_page_bootmem_info_node(NODE_DATA(i));
+#endif
+}
void __init mem_init(void)
{
- unsigned long codepages, datapages, initpages;
unsigned long addr, last;
addr = PAGE_OFFSET + kern_base;
last = PAGE_ALIGN(kern_size) + addr;
while (addr < last) {
- set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
+ set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
addr += PAGE_SIZE;
}
@@ -2030,26 +2157,8 @@ void __init mem_init(void)
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
- {
- int i;
- for_each_online_node(i) {
- if (NODE_DATA(i)->node_spanned_pages != 0) {
- totalram_pages +=
- free_all_bootmem_node(NODE_DATA(i));
- }
- }
- totalram_pages += free_low_memory_core_early(MAX_NUMNODES);
- }
-#else
- totalram_pages = free_all_bootmem();
-#endif
-
- /* We subtract one to account for the mem_map_zero page
- * allocated below.
- */
- totalram_pages -= 1;
- num_physpages = totalram_pages;
+ register_page_bootmem_info();
+ free_all_bootmem();
/*
* Set up the zero page, mark it reserved, so that page count
@@ -2060,21 +2169,9 @@ void __init mem_init(void)
prom_printf("paging_init: Cannot alloc zero page.\n");
prom_halt();
}
- SetPageReserved(mem_map_zero);
-
- codepages = (((unsigned long) _etext) - ((unsigned long) _start));
- codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
- datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
- datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
- initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
- initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
+ mark_page_reserved(mem_map_zero);
- printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- codepages << (PAGE_SHIFT-10),
- datapages << (PAGE_SHIFT-10),
- initpages << (PAGE_SHIFT-10),
- PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
+ mem_init_print_info(NULL);
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cheetah_ecache_flush_init();
@@ -2100,39 +2197,22 @@ void free_initmem(void)
initend = (unsigned long)(__init_end) & PAGE_MASK;
for (; addr < initend; addr += PAGE_SIZE) {
unsigned long page;
- struct page *p;
page = (addr +
((unsigned long) __va(kern_base)) -
((unsigned long) KERNBASE));
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- if (do_free) {
- p = virt_to_page(page);
-
- ClearPageReserved(p);
- init_page_count(p);
- __free_page(p);
- num_physpages++;
- totalram_pages++;
- }
+ if (do_free)
+ free_reserved_page(virt_to_page(page));
}
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start < end)
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- struct page *p = virt_to_page(start);
-
- ClearPageReserved(p);
- init_page_count(p);
- __free_page(p);
- num_physpages++;
- totalram_pages++;
- }
+ free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
+ "initrd");
}
#endif
@@ -2169,10 +2249,9 @@ unsigned long vmemmap_table[VMEMMAP_SIZE];
static long __meminitdata addr_start, addr_end;
static int __meminitdata node_start;
-int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+ int node)
{
- unsigned long vstart = (unsigned long) start;
- unsigned long vend = (unsigned long) (start + nr);
unsigned long phys_start = (vstart - VMEMMAP_BASE);
unsigned long phys_end = (vend - VMEMMAP_BASE);
unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
@@ -2193,7 +2272,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
void *block;
if (!(*vmem_pp & _PAGE_VALID)) {
- block = vmemmap_alloc_block(1UL << 22, node);
+ block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
if (!block)
return -ENOMEM;
@@ -2223,6 +2302,11 @@ void __meminit vmemmap_populate_print_last(void)
node_start = 0;
}
}
+
+void vmemmap_free(unsigned long start, unsigned long end)
+{
+}
+
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static void prot_init_common(unsigned long page_none,
@@ -2275,11 +2359,10 @@ static void __init sun4u_pgprot_init(void)
__ACCESS_BITS_4U | _PAGE_E_4U);
#ifdef CONFIG_DEBUG_PAGEALLOC
- kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
- 0xfffff80000000000UL;
+ kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
#else
kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
- 0xfffff80000000000UL;
+ PAGE_OFFSET;
#endif
kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
_PAGE_P_4U | _PAGE_W_4U);
@@ -2287,7 +2370,6 @@ static void __init sun4u_pgprot_init(void)
for (i = 1; i < 4; i++)
kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
- _PAGE_SZBITS = _PAGE_SZBITS_4U;
_PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
_PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
_PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
@@ -2324,11 +2406,10 @@ static void __init sun4v_pgprot_init(void)
_PAGE_CACHE = _PAGE_CACHE_4V;
#ifdef CONFIG_DEBUG_PAGEALLOC
- kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
- 0xfffff80000000000UL;
+ kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
#else
kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
- 0xfffff80000000000UL;
+ PAGE_OFFSET;
#endif
kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
_PAGE_P_4V | _PAGE_W_4V);
@@ -2339,7 +2420,6 @@ static void __init sun4v_pgprot_init(void)
pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
__ACCESS_BITS_4V | _PAGE_E_4V);
- _PAGE_SZBITS = _PAGE_SZBITS_4V;
_PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
_PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
_PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
@@ -2472,3 +2552,150 @@ void __flush_tlb_all(void)
__asm__ __volatile__("wrpr %0, 0, %%pstate"
: : "r" (pstate));
}
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
+ __GFP_REPEAT | __GFP_ZERO);
+ pte_t *pte = NULL;
+
+ if (page)
+ pte = (pte_t *) page_address(page);
+
+ return pte;
+}
+
+pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
+ __GFP_REPEAT | __GFP_ZERO);
+ if (!page)
+ return NULL;
+ if (!pgtable_page_ctor(page)) {
+ free_hot_cold_page(page, 0);
+ return NULL;
+ }
+ return (pte_t *) page_address(page);
+}
+
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static void __pte_free(pgtable_t pte)
+{
+ struct page *page = virt_to_page(pte);
+
+ pgtable_page_dtor(page);
+ __free_page(page);
+}
+
+void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ __pte_free(pte);
+}
+
+void pgtable_free(void *table, bool is_page)
+{
+ if (is_page)
+ __pte_free(table);
+ else
+ kmem_cache_free(pgtable_cache, table);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd)
+{
+ unsigned long pte, flags;
+ struct mm_struct *mm;
+ pmd_t entry = *pmd;
+
+ if (!pmd_large(entry) || !pmd_young(entry))
+ return;
+
+ pte = pmd_val(entry);
+
+ /* We are fabricating 8MB pages using 4MB real hw pages. */
+ pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
+
+ mm = vma->vm_mm;
+
+ spin_lock_irqsave(&mm->context.lock, flags);
+
+ if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
+ __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
+ addr, pte);
+
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static void context_reload(void *__data)
+{
+ struct mm_struct *mm = __data;
+
+ if (mm == current->mm)
+ load_secondary_context(mm);
+}
+
+void hugetlb_setup(struct pt_regs *regs)
+{
+ struct mm_struct *mm = current->mm;
+ struct tsb_config *tp;
+
+ if (in_atomic() || !mm) {
+ const struct exception_table_entry *entry;
+
+ entry = search_exception_tables(regs->tpc);
+ if (entry) {
+ regs->tpc = entry->fixup;
+ regs->tnpc = regs->tpc + 4;
+ return;
+ }
+ pr_alert("Unexpected HugeTLB setup in atomic context.\n");
+ die_if_kernel("HugeTSB in atomic", regs);
+ }
+
+ tp = &mm->context.tsb_block[MM_TSB_HUGE];
+ if (likely(tp->tsb == NULL))
+ tsb_grow(mm, MM_TSB_HUGE, 0);
+
+ tsb_context_switch(mm);
+ smp_tsb_sync(mm);
+
+ /* On UltraSPARC-III+ and later, configure the second half of
+ * the Data-TLB for huge pages.
+ */
+ if (tlb_type == cheetah_plus) {
+ unsigned long ctx;
+
+ spin_lock(&ctx_alloc_lock);
+ ctx = mm->context.sparc64_ctx_val;
+ ctx &= ~CTX_PGSZ_MASK;
+ ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
+ ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
+
+ if (ctx != mm->context.sparc64_ctx_val) {
+ /* When changing the page size fields, we
+ * must perform a context flush so that no
+ * stale entries match. This flush must
+ * occur with the original context register
+ * settings.
+ */
+ do_flush_tlb_mm(mm);
+
+ /* Reload the context register of all processors
+ * also executing in this address space.
+ */
+ mm->context.sparc64_ctx_val = ctx;
+ on_each_cpu(context_reload, mm, 0);
+ }
+ spin_unlock(&ctx_alloc_lock);
+ }
+}
+#endif