diff options
Diffstat (limited to 'arch/microblaze/include/asm/pgtable.h')
| -rw-r--r-- | arch/microblaze/include/asm/pgtable.h | 98 |
1 files changed, 29 insertions, 69 deletions
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index dd2bb60651c..95cef0b5f83 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -13,9 +13,6 @@ #include <asm/setup.h> -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - #ifndef __ASSEMBLY__ extern int mem_init_done; #endif @@ -57,6 +54,13 @@ static inline int pte_file(pte_t pte) { return 0; } #define pgprot_noncached_wc(prot) prot +/* + * All 32bit addresses are effectively valid for vmalloc... + * Sort of meaningless for non-VM targets. + */ +#define VMALLOC_START 0 +#define VMALLOC_END 0xffffffff + #else /* CONFIG_MMU */ #include <asm-generic/4level-fixup.h> @@ -87,8 +91,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* Start and end of the vmalloc area. */ /* Make sure to map the vmalloc area above the pinned kernel memory area of 32Mb. */ -#define VMALLOC_START (CONFIG_KERNEL_START + \ - max(32 * 1024 * 1024UL, memory_size)) +#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE) #define VMALLOC_END ioremap_bot #endif /* __ASSEMBLY__ */ @@ -228,12 +231,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #ifndef _PAGE_SHARED #define _PAGE_SHARED 0 #endif -#ifndef _PAGE_HWWRITE -#define _PAGE_HWWRITE 0 -#endif -#ifndef _PAGE_HWEXEC -#define _PAGE_HWEXEC 0 -#endif #ifndef _PAGE_EXEC #define _PAGE_EXEC 0 #endif @@ -404,20 +401,19 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline unsigned long pte_update(pte_t *p, unsigned long clr, unsigned long set) { - unsigned long old, tmp, msr; - - __asm__ __volatile__("\ - msrclr %2, 0x2\n\ - nop\n\ - lw %0, %4, r0\n\ - andn %1, %0, %5\n\ - or %1, %1, %6\n\ - sw %1, %4, r0\n\ - mts rmsr, %2\n\ - nop" - : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) - : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p) - : "cc"); + unsigned long flags, old, tmp; + + raw_local_irq_save(flags); + + __asm__ __volatile__( "lw %0, %2, r0 \n" + "andn %1, %0, %3 \n" + "or %1, %1, %4 \n" + "sw %1, %2, r0 \n" + : "=&r" (old), "=&r" (tmp) + : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set) + : "cc"); + + raw_local_irq_restore(flags); return old; } @@ -437,8 +433,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, *ptep = pte; } -static inline int ptep_test_and_clear_young(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) { return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0; } @@ -450,6 +447,7 @@ static inline int ptep_test_and_clear_dirty(struct mm_struct *mm, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; } +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -497,12 +495,9 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) #define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) -#define pte_offset_map_nested(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) + ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) -#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) +#define pte_unmap(pte) kunmap_atomic(pte) /* Encode and decode a nonlinear file mapping entry */ #define PTE_FILE_MAX_BITS 29 @@ -512,15 +507,6 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* - * When flushing the tlb entry for a page, we also need to flush the hash - * table entry. flush_hash_page is assembler (for speed) in hashtable.S. - */ -extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep); - -/* Add an HPTE to the hash table */ -extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep); - -/* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit @@ -533,15 +519,7 @@ extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep); #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 }) - -/* CONFIG_APUS */ -/* For virtual address to physical address conversion */ -extern void cache_clear(__u32 addr, int length); -extern void cache_push(__u32 addr, int length); -extern int mm_end_of_chunk(unsigned long addr, int len); extern unsigned long iopa(unsigned long addr); -/* extern unsigned long mm_ptov(unsigned long addr) \ - __attribute__ ((const)); TBD */ /* Values for nocacheflag and cmode */ /* These are not used by the APUS kernel_map, but prevents @@ -552,23 +530,9 @@ extern unsigned long iopa(unsigned long addr); #define IOMAP_NOCACHE_NONSER 2 #define IOMAP_NO_COPYBACK 3 -/* - * Map some physical address range into the kernel address space. - */ -extern unsigned long kernel_map(unsigned long paddr, unsigned long size, - int nocacheflag, unsigned long *memavailp); - -/* - * Set cache mode of (kernel space) address range. - */ -extern void kernel_set_cachemode(unsigned long address, unsigned long size, - unsigned int cmode); - /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ #define kern_addr_valid(addr) (1) -#define io_remap_page_range remap_page_range - /* * No page table caches to initialise */ @@ -577,10 +541,6 @@ extern void kernel_set_cachemode(unsigned long address, unsigned long size, void do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code); -void __init io_block_mapping(unsigned long virt, phys_addr_t phys, - unsigned int size, int flags); - -void __init adjust_total_lowmem(void); void mapin_ram(void); int map_page(unsigned long va, phys_addr_t pa, int flags); @@ -600,8 +560,8 @@ void __init *early_get_page(void); extern unsigned long ioremap_bot, ioremap_base; -void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); -void consistent_free(void *vaddr); +void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle); +void consistent_free(size_t size, void *vaddr); void consistent_sync(void *vaddr, size_t size, int direction); void consistent_sync_page(struct page *page, unsigned long offset, size_t size, int direction); |
