diff options
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/dump_pagetables.c | 32 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/highmem_32.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 14 | ||||
-rw-r--r-- | arch/x86/mm/kmmio.c | 16 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/pat_rbtree.c | 35 | ||||
-rw-r--r-- | arch/x86/mm/pf_in.c | 30 | ||||
-rw-r--r-- | arch/x86/mm/testmmiotrace.c | 22 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 4 |
12 files changed, 100 insertions, 73 deletions
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a725b7f760a..0002a3a3308 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -37,6 +37,28 @@ struct addr_marker { const char *name; }; +/* indices for address_markers; keep sync'd w/ address_markers below */ +enum address_markers_idx { + USER_SPACE_NR = 0, +#ifdef CONFIG_X86_64 + KERNEL_SPACE_NR, + LOW_KERNEL_NR, + VMALLOC_START_NR, + VMEMMAP_START_NR, + HIGH_KERNEL_NR, + MODULES_VADDR_NR, + MODULES_END_NR, +#else + KERNEL_SPACE_NR, + VMALLOC_START_NR, + VMALLOC_END_NR, +# ifdef CONFIG_HIGHMEM + PKMAP_BASE_NR, +# endif + FIXADDR_START_NR, +#endif +}; + /* Address space markers hints */ static struct addr_marker address_markers[] = { { 0, "User Space" }, @@ -331,14 +353,12 @@ static int pt_dump_init(void) #ifdef CONFIG_X86_32 /* Not a compile-time constant on x86-32 */ - address_markers[2].start_address = VMALLOC_START; - address_markers[3].start_address = VMALLOC_END; + address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; + address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; # ifdef CONFIG_HIGHMEM - address_markers[4].start_address = PKMAP_BASE; - address_markers[5].start_address = FIXADDR_START; -# else - address_markers[4].start_address = FIXADDR_START; + address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; # endif + address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; #endif pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL, diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index f62777940df..4c4508e8a20 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -802,8 +802,10 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die: */ - if (!(error_code & PF_USER)) + if (!(error_code & PF_USER)) { no_context(regs, error_code, address); + return; + } /* User-space => ok to do another page fault: */ if (is_prefetch(regs, error_code, address)) diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 63a6ba66cbe..5e8fa12ef86 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page, enum km_type type) return kmap_atomic_prot(page, type, kmap_prot); } -void kunmap_atomic(void *kvaddr, enum km_type type) +void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); @@ -102,7 +102,7 @@ struct page *kmap_atomic_to_page(void *ptr) EXPORT_SYMBOL(kmap); EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kmap_atomic); -EXPORT_SYMBOL(kunmap_atomic); +EXPORT_SYMBOL(kunmap_atomic_notypecheck); EXPORT_SYMBOL(kmap_atomic_prot); EXPORT_SYMBOL(kmap_atomic_to_page); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ee41bba315d..9a6674689a2 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -2,7 +2,7 @@ * linux/arch/x86_64/mm/init.c * * Copyright (C) 1995 Linus Torvalds - * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> + * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> */ diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 12e4d2d3c11..3ba6e0608c5 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -62,8 +62,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, static void __iomem *__ioremap_caller(resource_size_t phys_addr, unsigned long size, unsigned long prot_val, void *caller) { - unsigned long pfn, offset, vaddr; - resource_size_t last_addr; + unsigned long offset, vaddr; + resource_size_t pfn, last_pfn, last_addr; const resource_size_t unaligned_phys_addr = phys_addr; const unsigned long unaligned_size = size; struct vm_struct *area; @@ -100,10 +100,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, /* * Don't allow anybody to remap normal RAM that we're using.. */ - for (pfn = phys_addr >> PAGE_SHIFT; - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); - pfn++) { - + last_pfn = last_addr >> PAGE_SHIFT; + for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { int is_ram = page_is_ram(pfn); if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) @@ -115,7 +113,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; + phys_addr &= PHYSICAL_PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; retval = reserve_memtype(phys_addr, (u64)phys_addr + size, @@ -613,7 +611,7 @@ void __init early_iounmap(void __iomem *addr, unsigned long size) return; } offset = virt_addr & ~PAGE_MASK; - nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; + nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; while (nrpages > 0) { diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 5d0e67fff1a..e5d5e2ce9f7 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -45,6 +45,8 @@ struct kmmio_fault_page { * Protected by kmmio_lock, when linked into kmmio_page_table. */ int count; + + bool scheduled_for_release; }; struct kmmio_delayed_release { @@ -398,8 +400,11 @@ static void release_kmmio_fault_page(unsigned long page, BUG_ON(f->count < 0); if (!f->count) { disarm_kmmio_fault_page(f); - f->release_next = *release_list; - *release_list = f; + if (!f->scheduled_for_release) { + f->release_next = *release_list; + *release_list = f; + f->scheduled_for_release = true; + } } } @@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(struct rcu_head *head) prevp = &f->release_next; } else { *prevp = f->release_next; + f->release_next = NULL; + f->scheduled_for_release = false; } - f = f->release_next; + f = *prevp; } spin_unlock_irqrestore(&kmmio_lock, flags); @@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio_probe *p) kmmio_count--; spin_unlock_irqrestore(&kmmio_lock, flags); + if (!release_list) + return; + drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); if (!drelease) { pr_crit("leaking kmmio_fault_page objects.\n"); diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 550df481acc..787c52ca49c 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -3,12 +3,6 @@ #include <linux/module.h> #include <linux/bootmem.h> -#ifdef CONFIG_DEBUG_PER_CPU_MAPS -# define DBG(x...) printk(KERN_DEBUG x) -#else -# define DBG(x...) -#endif - /* * Which logical CPUs are on which nodes */ diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index acc15b23b74..f6ff57b7efa 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -158,7 +158,7 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) return req_type; } -static int pat_pagerange_is_ram(unsigned long start, unsigned long end) +static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) { int ram_page = 0, not_rampage = 0; unsigned long page_nr; @@ -302,7 +302,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, return -EINVAL; } - new = kmalloc(sizeof(struct memtype), GFP_KERNEL); + new = kzalloc(sizeof(struct memtype), GFP_KERNEL); if (!new) return -ENOMEM; diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index f537087bb74..8acaddd0fb2 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c @@ -34,8 +34,7 @@ * memtype_lock protects the rbtree. */ -static void memtype_rb_augment_cb(struct rb_node *node); -static struct rb_root memtype_rbroot = RB_AUGMENT_ROOT(&memtype_rb_augment_cb); +static struct rb_root memtype_rbroot = RB_ROOT; static int is_node_overlap(struct memtype *node, u64 start, u64 end) { @@ -56,7 +55,7 @@ static u64 get_subtree_max_end(struct rb_node *node) } /* Update 'subtree_max_end' for a node, based on node and its children */ -static void update_node_max_end(struct rb_node *node) +static void memtype_rb_augment_cb(struct rb_node *node, void *__unused) { struct memtype *data; u64 max_end, child_max_end; @@ -78,25 +77,6 @@ static void update_node_max_end(struct rb_node *node) data->subtree_max_end = max_end; } -/* Update 'subtree_max_end' for a node and all its ancestors */ -static void update_path_max_end(struct rb_node *node) -{ - u64 old_max_end, new_max_end; - - while (node) { - struct memtype *data = container_of(node, struct memtype, rb); - - old_max_end = data->subtree_max_end; - update_node_max_end(node); - new_max_end = data->subtree_max_end; - - if (new_max_end == old_max_end) - break; - - node = rb_parent(node); - } -} - /* Find the first (lowest start addr) overlapping range from rb tree */ static struct memtype *memtype_rb_lowest_match(struct rb_root *root, u64 start, u64 end) @@ -190,12 +170,6 @@ failure: return -EBUSY; } -static void memtype_rb_augment_cb(struct rb_node *node) -{ - if (node) - update_path_max_end(node); -} - static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) { struct rb_node **node = &(root->rb_node); @@ -213,6 +187,7 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) rb_link_node(&newdata->rb, parent, node); rb_insert_color(&newdata->rb, root); + rb_augment_insert(&newdata->rb, memtype_rb_augment_cb, NULL); } int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) @@ -226,6 +201,7 @@ int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) if (ret_type) new->type = *ret_type; + new->subtree_max_end = new->end; memtype_rb_insert(&memtype_rbroot, new); } return err; @@ -233,13 +209,16 @@ int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) struct memtype *rbt_memtype_erase(u64 start, u64 end) { + struct rb_node *deepest; struct memtype *data; data = memtype_rb_exact_match(&memtype_rbroot, start, end); if (!data) goto out; + deepest = rb_augment_erase_begin(&data->rb); rb_erase(&data->rb, &memtype_rbroot); + rb_augment_erase_end(deepest, memtype_rb_augment_cb, NULL); out: return data; } diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c index 308e32570d8..38e6d174c49 100644 --- a/arch/x86/mm/pf_in.c +++ b/arch/x86/mm/pf_in.c @@ -40,16 +40,16 @@ static unsigned char prefix_codes[] = { static unsigned int reg_rop[] = { 0x8A, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F }; -static unsigned int reg_wop[] = { 0x88, 0x89 }; +static unsigned int reg_wop[] = { 0x88, 0x89, 0xAA, 0xAB }; static unsigned int imm_wop[] = { 0xC6, 0xC7 }; /* IA32 Manual 3, 3-432*/ -static unsigned int rw8[] = { 0x88, 0x8A, 0xC6 }; +static unsigned int rw8[] = { 0x88, 0x8A, 0xC6, 0xAA }; static unsigned int rw32[] = { - 0x89, 0x8B, 0xC7, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F + 0x89, 0x8B, 0xC7, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F, 0xAB }; -static unsigned int mw8[] = { 0x88, 0x8A, 0xC6, 0xB60F, 0xBE0F }; +static unsigned int mw8[] = { 0x88, 0x8A, 0xC6, 0xB60F, 0xBE0F, 0xAA }; static unsigned int mw16[] = { 0xB70F, 0xBF0F }; -static unsigned int mw32[] = { 0x89, 0x8B, 0xC7 }; +static unsigned int mw32[] = { 0x89, 0x8B, 0xC7, 0xAB }; static unsigned int mw64[] = {}; #else /* not __i386__ */ static unsigned char prefix_codes[] = { @@ -63,20 +63,20 @@ static unsigned char prefix_codes[] = { static unsigned int reg_rop[] = { 0x8A, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F }; -static unsigned int reg_wop[] = { 0x88, 0x89 }; +static unsigned int reg_wop[] = { 0x88, 0x89, 0xAA, 0xAB }; static unsigned int imm_wop[] = { 0xC6, 0xC7 }; -static unsigned int rw8[] = { 0xC6, 0x88, 0x8A }; +static unsigned int rw8[] = { 0xC6, 0x88, 0x8A, 0xAA }; static unsigned int rw32[] = { - 0xC7, 0x89, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F + 0xC7, 0x89, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F, 0xAB }; /* 8 bit only */ -static unsigned int mw8[] = { 0xC6, 0x88, 0x8A, 0xB60F, 0xBE0F }; +static unsigned int mw8[] = { 0xC6, 0x88, 0x8A, 0xB60F, 0xBE0F, 0xAA }; /* 16 bit only */ static unsigned int mw16[] = { 0xB70F, 0xBF0F }; /* 16 or 32 bit */ static unsigned int mw32[] = { 0xC7 }; /* 16, 32 or 64 bit */ -static unsigned int mw64[] = { 0x89, 0x8B }; +static unsigned int mw64[] = { 0x89, 0x8B, 0xAB }; #endif /* not __i386__ */ struct prefix_bits { @@ -410,7 +410,6 @@ static unsigned long *get_reg_w32(int no, struct pt_regs *regs) unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs) { unsigned int opcode; - unsigned char mod_rm; int reg; unsigned char *p; struct prefix_bits prf; @@ -437,8 +436,13 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs) goto err; do_work: - mod_rm = *p; - reg = ((mod_rm >> 3) & 0x7) | (prf.rexr << 3); + /* for STOS, source register is fixed */ + if (opcode == 0xAA || opcode == 0xAB) { + reg = arg_AX; + } else { + unsigned char mod_rm = *p; + reg = ((mod_rm >> 3) & 0x7) | (prf.rexr << 3); + } switch (get_ins_reg_width(ins_addr)) { case 1: return *get_reg_w8(reg, prf.rex, regs); diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c index 8565d944f7c..38868adf07e 100644 --- a/arch/x86/mm/testmmiotrace.c +++ b/arch/x86/mm/testmmiotrace.c @@ -90,6 +90,27 @@ static void do_test(unsigned long size) iounmap(p); } +/* + * Tests how mmiotrace behaves in face of multiple ioremap / iounmaps in + * a short time. We had a bug in deferred freeing procedure which tried + * to free this region multiple times (ioremap can reuse the same address + * for many mappings). + */ +static void do_test_bulk_ioremapping(void) +{ + void __iomem *p; + int i; + + for (i = 0; i < 10; ++i) { + p = ioremap_nocache(mmio_address, PAGE_SIZE); + if (p) + iounmap(p); + } + + /* Force freeing. If it will crash we will know why. */ + synchronize_rcu(); +} + static int __init init(void) { unsigned long size = (read_far) ? (8 << 20) : (16 << 10); @@ -104,6 +125,7 @@ static int __init init(void) "and writing 16 kB of rubbish in there.\n", size >> 10, mmio_address); do_test(size); + do_test_bulk_ioremapping(); pr_info("All done.\n"); return 0; } diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 426f3a1a64d..c03f14ab666 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -278,11 +278,9 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) static void do_flush_tlb_all(void *info) { - unsigned long cpu = smp_processor_id(); - __flush_tlb_all(); if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) - leave_mm(cpu); + leave_mm(smp_processor_id()); } void flush_tlb_all(void) |