aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S2
-rw-r--r--arch/arm64/mm/dma-mapping.c36
-rw-r--r--arch/arm64/mm/init.c3
-rw-r--r--arch/arm64/mm/mmu.c12
-rw-r--r--arch/arm64/mm/pgd.c11
-rw-r--r--arch/arm64/mm/proc-macros.S3
-rw-r--r--arch/arm64/mm/proc.S71
7 files changed, 121 insertions, 17 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 48a386094fa..1ea9f26d1b7 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -146,7 +146,7 @@ ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
/*
- * __flush_kern_dcache_page(kaddr)
+ * __flush_dcache_area(kaddr, size)
*
* Ensure that the data held in the page kaddr is written back to the
* page in question.
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4bd7579ec9e..fbd76785c5d 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -21,6 +21,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
@@ -33,17 +34,48 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return NULL;
+ }
+
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA32;
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ struct page *page;
+
+ size = PAGE_ALIGN(size);
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size));
+ if (!page)
+ return NULL;
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ return page_address(page);
+ } else {
+ return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ }
}
static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ phys_addr_t paddr = dma_to_phys(dev, dma_handle);
+
+ dma_release_from_contiguous(dev,
+ phys_to_page(paddr),
+ size >> PAGE_SHIFT);
+ } else {
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ }
}
static struct dma_map_ops arm64_swiotlb_dma_ops = {
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0cb8742de4f..d0b4c2efda9 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -30,6 +30,7 @@
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of_fdt.h>
+#include <linux/dma-contiguous.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -159,6 +160,8 @@ void __init arm64_memblock_init(void)
memblock_reserve(base, size);
}
+ dma_contiguous_reserve(0);
+
memblock_allow_resize();
memblock_dump_all();
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f557ebbe701..f8dc7e8fce6 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0)
+ if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+ pmd_t old_pmd =*pmd;
set_pmd(pmd, __pmd(phys | prot_sect_kernel));
- else
+ /*
+ * Check for previous table entries created during
+ * boot (__create_page_tables) and flush them.
+ */
+ if (!pmd_none(old_pmd))
+ flush_tlb_all();
+ } else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
+ }
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7083cdada65..62c6101df26 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -32,17 +32,10 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *new_pgd;
-
if (PGD_SIZE == PAGE_SIZE)
- new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else
- new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL);
-
- if (!new_pgd)
- return NULL;
-
- return new_pgd;
+ return kzalloc(PGD_SIZE, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 8957b822010..005d29e2977 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -38,8 +38,7 @@
*/
.macro dcache_line_size, reg, tmp
mrs \tmp, ctr_el0 // read CTR
- lsr \tmp, \tmp, #16
- and \tmp, \tmp, #0xf // cache line size encoding
+ ubfm \tmp, \tmp, #16, #19 // cache line size encoding
mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size
.endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 0f7fec52c7f..1333e6f9a8e 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -80,8 +80,77 @@ ENTRY(cpu_do_idle)
ret
ENDPROC(cpu_do_idle)
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+/**
+ * cpu_do_suspend - save CPU registers context
+ *
+ * x0: virtual address of context pointer
+ */
+ENTRY(cpu_do_suspend)
+ mrs x2, tpidr_el0
+ mrs x3, tpidrro_el0
+ mrs x4, contextidr_el1
+ mrs x5, mair_el1
+ mrs x6, cpacr_el1
+ mrs x7, ttbr1_el1
+ mrs x8, tcr_el1
+ mrs x9, vbar_el1
+ mrs x10, mdscr_el1
+ mrs x11, oslsr_el1
+ mrs x12, sctlr_el1
+ stp x2, x3, [x0]
+ stp x4, x5, [x0, #16]
+ stp x6, x7, [x0, #32]
+ stp x8, x9, [x0, #48]
+ stp x10, x11, [x0, #64]
+ str x12, [x0, #80]
+ ret
+ENDPROC(cpu_do_suspend)
+
+/**
+ * cpu_do_resume - restore CPU register context
+ *
+ * x0: Physical address of context pointer
+ * x1: ttbr0_el1 to be restored
+ *
+ * Returns:
+ * sctlr_el1 value in x0
+ */
+ENTRY(cpu_do_resume)
+ /*
+ * Invalidate local tlb entries before turning on MMU
+ */
+ tlbi vmalle1
+ ldp x2, x3, [x0]
+ ldp x4, x5, [x0, #16]
+ ldp x6, x7, [x0, #32]
+ ldp x8, x9, [x0, #48]
+ ldp x10, x11, [x0, #64]
+ ldr x12, [x0, #80]
+ msr tpidr_el0, x2
+ msr tpidrro_el0, x3
+ msr contextidr_el1, x4
+ msr mair_el1, x5
+ msr cpacr_el1, x6
+ msr ttbr0_el1, x1
+ msr ttbr1_el1, x7
+ msr tcr_el1, x8
+ msr vbar_el1, x9
+ msr mdscr_el1, x10
+ /*
+ * Restore oslsr_el1 by writing oslar_el1
+ */
+ ubfx x11, x11, #1, #1
+ msr oslar_el1, x11
+ mov x0, x12
+ dsb nsh // Make sure local tlb invalidation completed
+ isb
+ ret
+ENDPROC(cpu_do_resume)
+#endif
+
/*
- * cpu_switch_mm(pgd_phys, tsk)
+ * cpu_do_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys.
*