diff options
Diffstat (limited to 'arch/i386/mm')
-rw-r--r-- | arch/i386/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/i386/mm/highmem.c | 18 | ||||
-rw-r--r-- | arch/i386/mm/init.c | 1 | ||||
-rw-r--r-- | arch/i386/mm/ioremap.c | 84 |
4 files changed, 16 insertions, 91 deletions
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index 5e17a3f43b4..2581575786c 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c @@ -440,7 +440,7 @@ good_area: case 1: /* read, present */ goto bad_area; case 0: /* read, not present */ - if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) goto bad_area; } @@ -589,7 +589,7 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (tsk->pid == 1) { + if (is_init(tsk)) { yield(); down_read(&mm->mmap_sem); goto survive; diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c index ba44000b906..f9f647cdbc7 100644 --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c @@ -38,22 +38,19 @@ void *kmap_atomic(struct page *page, enum km_type type) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -#ifdef CONFIG_DEBUG_HIGHMEM if (!pte_none(*(kmap_pte-idx))) BUG(); -#endif set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); - __flush_tlb_one(vaddr); return (void*) vaddr; } void kunmap_atomic(void *kvaddr, enum km_type type) { -#ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); +#ifdef CONFIG_DEBUG_HIGHMEM if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) { dec_preempt_count(); preempt_check_resched(); @@ -62,14 +59,14 @@ void kunmap_atomic(void *kvaddr, enum km_type type) if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) BUG(); - +#endif /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it + * Force other mappings to Oops if they'll try to access this pte + * without first remap it. Keeping stale mappings around is a bad idea + * also, in case the page changes cacheability attributes or becomes + * a protected page in a hypervisor. */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); - __flush_tlb_one(vaddr); -#endif + kpte_clear_flush(kmap_pte-idx, vaddr); dec_preempt_count(); preempt_check_resched(); @@ -88,7 +85,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); - __flush_tlb_one(vaddr); return (void*) vaddr; } diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 4a5a914b343..90089c14c23 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -493,6 +493,7 @@ int __init set_kernel_exec(unsigned long vaddr, int enable) pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); else pte->pte_high |= 1 << (_PAGE_BIT_NX - 32); + pte_update_defer(&init_mm, vaddr, pte); __flush_tlb_all(); out: return ret; diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c index 247fde76aae..fff08ae7b5e 100644 --- a/arch/i386/mm/ioremap.c +++ b/arch/i386/mm/ioremap.c @@ -12,7 +12,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> -#include <asm/io.h> +#include <linux/io.h> #include <asm/fixmap.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> @@ -21,82 +21,6 @@ #define ISA_START_ADDRESS 0xa0000 #define ISA_END_ADDRESS 0x100000 -static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, - unsigned long end, unsigned long phys_addr, unsigned long flags) -{ - pte_t *pte; - unsigned long pfn; - - pfn = phys_addr >> PAGE_SHIFT; - pte = pte_alloc_kernel(pmd, addr); - if (!pte) - return -ENOMEM; - do { - BUG_ON(!pte_none(*pte)); - set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | - _PAGE_DIRTY | _PAGE_ACCESSED | flags))); - pfn++; - } while (pte++, addr += PAGE_SIZE, addr != end); - return 0; -} - -static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, - unsigned long end, unsigned long phys_addr, unsigned long flags) -{ - pmd_t *pmd; - unsigned long next; - - phys_addr -= addr; - pmd = pmd_alloc(&init_mm, pud, addr); - if (!pmd) - return -ENOMEM; - do { - next = pmd_addr_end(addr, end); - if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, flags)) - return -ENOMEM; - } while (pmd++, addr = next, addr != end); - return 0; -} - -static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys_addr, unsigned long flags) -{ - pud_t *pud; - unsigned long next; - - phys_addr -= addr; - pud = pud_alloc(&init_mm, pgd, addr); - if (!pud) - return -ENOMEM; - do { - next = pud_addr_end(addr, end); - if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, flags)) - return -ENOMEM; - } while (pud++, addr = next, addr != end); - return 0; -} - -static int ioremap_page_range(unsigned long addr, - unsigned long end, unsigned long phys_addr, unsigned long flags) -{ - pgd_t *pgd; - unsigned long next; - int err; - - BUG_ON(addr >= end); - flush_cache_all(); - phys_addr -= addr; - pgd = pgd_offset_k(addr); - do { - next = pgd_addr_end(addr, end); - err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags); - if (err) - break; - } while (pgd++, addr = next, addr != end); - flush_tlb_all(); - return err; -} - /* * Generic mapping function (not visible outside): */ @@ -115,6 +39,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l void __iomem * addr; struct vm_struct * area; unsigned long offset, last_addr; + pgprot_t prot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -142,6 +67,9 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l return NULL; } + prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY + | _PAGE_ACCESSED | flags); + /* * Mappings have to be page-aligned */ @@ -158,7 +86,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l area->phys_addr = phys_addr; addr = (void __iomem *) area->addr; if (ioremap_page_range((unsigned long) addr, - (unsigned long) addr + size, phys_addr, flags)) { + (unsigned long) addr + size, phys_addr, prot)) { vunmap((void __force *) addr); return NULL; } |