diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-26 17:15:20 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-26 17:15:20 -0700 |
commit | 31453a9764f7e2a72a6e2c502ace586e2663a68c (patch) | |
tree | 5d4db63de5b4b85d1ffdab4e95a75175a784a10a /arch | |
parent | f9ba5375a8aae4aeea6be15df77e24707a429812 (diff) | |
parent | 93ed0e2d07b25aff4db1d61bfbcd1e82074c0ad5 (diff) |
Merge branch 'akpm-incoming-1'
* akpm-incoming-1: (176 commits)
scripts/checkpatch.pl: add check for declaration of pci_device_id
scripts/checkpatch.pl: add warnings for static char that could be static const char
checkpatch: version 0.31
checkpatch: statement/block context analyser should look at sanitised lines
checkpatch: handle EXPORT_SYMBOL for DEVICE_ATTR and similar
checkpatch: clean up structure definition macro handline
checkpatch: update copyright dates
checkpatch: Add additional attribute #defines
checkpatch: check for incorrect permissions
checkpatch: ensure kconfig help checks only apply when we are adding help
checkpatch: simplify and consolidate "missing space after" checks
checkpatch: add check for space after struct, union, and enum
checkpatch: returning errno typically should be negative
checkpatch: handle casts better fixing false categorisation of : as binary
checkpatch: ensure we do not collapse bracketed sections into constants
checkpatch: suggest cleanpatch and cleanfile when appropriate
checkpatch: types may sit on a line on their own
checkpatch: fix regressions in "fix handling of leading spaces"
div64_u64(): improve precision on 32bit platforms
lib/parser: cleanup match_number()
...
Diffstat (limited to 'arch')
72 files changed, 454 insertions, 588 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index d04ccd73af4..28f93a6c0fd 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -55,6 +55,9 @@ config ZONE_DMA bool default y +config ARCH_DMA_ADDR_T_64BIT + def_bool y + config NEED_DMA_MAP_STATE def_bool y diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h index 21ac53383b3..9f67a056b46 100644 --- a/arch/alpha/include/asm/core_mcpcia.h +++ b/arch/alpha/include/asm/core_mcpcia.h @@ -247,7 +247,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck { #define vip volatile int __force * #define vuip volatile unsigned int __force * -#ifdef MCPCIA_ONE_HAE_WINDOW +#ifndef MCPCIA_ONE_HAE_WINDOW #define MCPCIA_FROB_MMIO \ if (__mcpcia_is_mmio(hose)) { \ set_hae(hose & 0xffffffff); \ diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h index 471c07292e0..91b46801b29 100644 --- a/arch/alpha/include/asm/core_t2.h +++ b/arch/alpha/include/asm/core_t2.h @@ -1,6 +1,9 @@ #ifndef __ALPHA_T2__H__ #define __ALPHA_T2__H__ +/* Fit everything into one 128MB HAE window. */ +#define T2_ONE_HAE_WINDOW 1 + #include <linux/types.h> #include <linux/spinlock.h> #include <asm/compiler.h> @@ -19,7 +22,7 @@ * */ -#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */ +#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 27 bits */ /* GAMMA-SABLE is a SABLE with EV5-based CPUs */ /* All LYNX machines, EV4 or EV5, use the GAMMA bias also */ @@ -85,7 +88,9 @@ #define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL) #define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL) +#ifndef T2_ONE_HAE_WINDOW #define T2_HAE_ADDRESS T2_HAE_1 +#endif /* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to 3.8fff.ffff @@ -429,13 +434,15 @@ extern inline void t2_outl(u32 b, unsigned long addr) * */ +#ifdef T2_ONE_HAE_WINDOW +#define t2_set_hae +#else #define t2_set_hae { \ - msb = addr >> 27; \ + unsigned long msb = addr >> 27; \ addr &= T2_MEM_R1_MASK; \ set_hae(msb); \ } - -extern raw_spinlock_t t2_hae_lock; +#endif /* * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since @@ -446,28 +453,22 @@ extern raw_spinlock_t t2_hae_lock; __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long result, msb; - unsigned long flags; - raw_spin_lock_irqsave(&t2_hae_lock, flags); + unsigned long result; t2_set_hae; result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return __kernel_extbl(result, addr & 3); } __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long result, msb; - unsigned long flags; - raw_spin_lock_irqsave(&t2_hae_lock, flags); + unsigned long result; t2_set_hae; result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return __kernel_extwl(result, addr & 3); } @@ -478,59 +479,47 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long result, msb; - unsigned long flags; - raw_spin_lock_irqsave(&t2_hae_lock, flags); + unsigned long result; t2_set_hae; result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return result & 0xffffffffUL; } __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long r0, r1, work, msb; - unsigned long flags; - raw_spin_lock_irqsave(&t2_hae_lock, flags); + unsigned long r0, r1, work; t2_set_hae; work = (addr << 5) + T2_SPARSE_MEM + 0x18; r0 = *(vuip)(work); r1 = *(vuip)(work + (4 << 5)); - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return r1 << 32 | r0; } __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long msb, w; - unsigned long flags; - raw_spin_lock_irqsave(&t2_hae_lock, flags); + unsigned long w; t2_set_hae; w = __kernel_insbl(b, addr & 3); *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long msb, w; - unsigned long flags; - raw_spin_lock_irqsave(&t2_hae_lock, flags); + unsigned long w; t2_set_hae; w = __kernel_inswl(b, addr & 3); *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } /* @@ -540,29 +529,22 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long msb; - unsigned long flags; - raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long msb, work; - unsigned long flags; - raw_spin_lock_irqsave(&t2_hae_lock, flags); + unsigned long work; t2_set_hae; work = (addr << 5) + T2_SPARSE_MEM + 0x18; *(vuip)work = b; *(vuip)(work + (4 << 5)) = b >> 32; - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 71a24329414..de98a732683 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -318,9 +318,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address) } #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) extern pgd_t swapper_pg_dir[1024]; diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c index e6d90568b65..2f770e99428 100644 --- a/arch/alpha/kernel/core_t2.c +++ b/arch/alpha/kernel/core_t2.c @@ -74,8 +74,6 @@ # define DBG(args) #endif -DEFINE_RAW_SPINLOCK(t2_hae_lock); - static volatile unsigned int t2_mcheck_any_expected; static volatile unsigned int t2_mcheck_last_taken; @@ -406,6 +404,7 @@ void __init t2_init_arch(void) { struct pci_controller *hose; + struct resource *hae_mem; unsigned long temp; unsigned int i; @@ -433,7 +432,13 @@ t2_init_arch(void) */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; - hose->mem_space = &iomem_resource; + hae_mem = alloc_resource(); + hae_mem->start = 0; + hae_mem->end = T2_MEM_R1_MASK; + hae_mem->name = pci_hae0_name; + if (request_resource(&iomem_resource, hae_mem) < 0) + printk(KERN_ERR "Failed to request HAE_MEM\n"); + hose->mem_space = hae_mem; hose->index = 0; hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR; diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h index 512685f7809..7fa62488bd1 100644 --- a/arch/alpha/kernel/machvec_impl.h +++ b/arch/alpha/kernel/machvec_impl.h @@ -25,6 +25,9 @@ #ifdef MCPCIA_ONE_HAE_WINDOW #define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache) #endif +#ifdef T2_ONE_HAE_WINDOW +#define T2_HAE_ADDRESS (&alpha_mv.hae_cache) +#endif /* Only a few systems don't define IACK_SC, handling all interrupts through the SRM console. But splitting out that one case from IO() below diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 5aff5812660..1fc684e70ab 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); #ifdef CONFIG_HIGHMEM extern void *kmap(struct page *page); extern void kunmap(struct page *page); -extern void *kmap_atomic(struct page *page, enum km_type type); -extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); -extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); +extern void *__kmap_atomic(struct page *page); +extern void __kunmap_atomic(void *kvaddr); +extern void *kmap_atomic_pfn(unsigned long pfn); extern struct page *kmap_atomic_to_page(const void *ptr); #endif diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index a9672e8406a..b155414192d 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -263,17 +263,15 @@ extern struct page *empty_zero_page; #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr)) -#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr)) -#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0) -#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1) +#define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr)) +#define pte_unmap(pte) __pte_unmap(pte) #ifndef CONFIG_HIGHPTE -#define __pte_map(dir,km) pmd_page_vaddr(*(dir)) -#define __pte_unmap(pte,km) do { } while (0) +#define __pte_map(dir) pmd_page_vaddr(*(dir)) +#define __pte_unmap(pte) do { } while (0) #else -#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE) -#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km) +#define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE) +#define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE)) #endif #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index 4566bd1c866..ef06c66a6f1 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c @@ -358,8 +358,7 @@ static int calc_clk_div(struct clk *clk, unsigned long rate, int i, found = 0, __div = 0, __pdiv = 0; /* Don't exceed the maximum rate */ - max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4), - clk_xtali.rate / 4); + max_rate = max3(clk_pll1.rate / 4, clk_pll2.rate / 4, clk_xtali.rate / 4); rate = min(rate, max_rate); /* diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 8440d952ba6..c493d7244d3 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -89,13 +89,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, * open-code the spin-locking. */ ptl = pte_lockptr(vma->vm_mm, pmd); - pte = pte_offset_map_nested(pmd, address); + pte = pte_offset_map(pmd, address); spin_lock(ptl); ret = do_adjust_pte(vma, address, pfn, pte); spin_unlock(ptl); - pte_unmap_nested(pte); + pte_unmap(pte); return ret; } diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 1fbdb55bfd1..c00f119babb 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -36,18 +36,17 @@ void kunmap(struct page *page) } EXPORT_SYMBOL(kunmap); -void *kmap_atomic(struct page *page, enum km_type type) +void *__kmap_atomic(struct page *page) { unsigned int idx; unsigned long vaddr; void *kmap; + int type; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); - debug_kmap_atomic(type); - #ifdef CONFIG_DEBUG_HIGHMEM /* * There is no cache coherency issue when non VIVT, so force the @@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type) if (kmap) return kmap; + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM @@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type) return (void *)vaddr; } -EXPORT_SYMBOL(kmap_atomic); +EXPORT_SYMBOL(__kmap_atomic); -void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); + int idx, type; if (kvaddr >= (void *)FIXADDR_START) { + type = kmap_atomic_idx_pop(); + idx = type + KM_TYPE_NR * smp_processor_id(); + if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM @@ -103,15 +107,16 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) } pagefault_enable(); } -EXPORT_SYMBOL(kunmap_atomic_notypecheck); +EXPORT_SYMBOL(__kunmap_atomic); -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +void *kmap_atomic_pfn(unsigned long pfn) { - unsigned int idx; unsigned long vaddr; + int idx, type; pagefault_disable(); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index be5f58e153b..69bbfc6645a 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -57,9 +57,9 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) goto no_pte; init_pmd = pmd_offset(init_pgd, 0); - init_pte = pte_offset_map_nested(init_pmd, 0); + init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); - pte_unmap_nested(init_pte); + pte_unmap(init_pte); pte_unmap(new_pte); } diff --git a/arch/avr32/include/asm/pgtable.h b/arch/avr32/include/asm/pgtable.h index a9ae30c41e7..6fbfea61f7b 100644 --- a/arch/avr32/include/asm/pgtable.h +++ b/arch/avr32/include/asm/pgtable.h @@ -319,9 +319,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) -#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) struct vm_area_struct; extern void update_mmu_cache(struct vm_area_struct * vma, diff --git a/arch/blackfin/include/asm/entry.h b/arch/blackfin/include/asm/entry.h index a6886f6e481..4104d5783e2 100644 --- a/arch/blackfin/include/asm/entry.h +++ b/arch/blackfin/include/asm/entry.h @@ -15,14 +15,6 @@ #define LFLUSH_I_AND_D 0x00000808 #define LSIGTRAP 5 -/* process bits for task_struct.flags */ -#define PF_TRACESYS_OFF 3 -#define PF_TRACESYS_BIT 5 -#define PF_PTRACED_OFF 3 -#define PF_PTRACED_BIT 4 -#define PF_DTRACE_OFF 1 -#define PF_DTRACE_BIT 5 - /* * NOTE! The single-stepping code assumes that all interrupt handlers * start by saving SYSCFG on the stack with their first instruction. diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index f63d6fccbc6..9eaae217b21 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h @@ -248,10 +248,8 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h index cb4c317eaec..a8d6565d415 100644 --- a/arch/frv/include/asm/highmem.h +++ b/arch/frv/include/asm/highmem.h @@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr); (void *) damlr; \ }) -static inline void *kmap_atomic(struct page *page, enum km_type type) +static inline void *kmap_atomic_primary(struct page *page, enum km_type type) { unsigned long paddr; pagefault_disable(); - debug_kmap_atomic(type); paddr = page_to_phys(page); switch (type) { @@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) case 1: return __kmap_atomic_primary(1, paddr, 3); case 2: return __kmap_atomic_primary(2, paddr, 4); case 3: return __kmap_atomic_primary(3, paddr, 5); - case 4: return __kmap_atomic_primary(4, paddr, 6); - case 5: return __kmap_atomic_primary(5, paddr, 7); - case 6: return __kmap_atomic_primary(6, paddr, 8); - case 7: return __kmap_atomic_primary(7, paddr, 9); - case 8: return __kmap_atomic_primary(8, paddr, 10); - - case 9 ... 9 + NR_TLB_LINES - 1: - return __kmap_atomic_secondary(type - 9, paddr); default: BUG(); @@ -152,22 +143,13 @@ do { \ asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ } while(0) -static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) +static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type) { switch (type) { case 0: __kunmap_atomic_primary(0, 2); break; case 1: __kunmap_atomic_primary(1, 3); break; case 2: __kunmap_atomic_primary(2, 4); break; case 3: __kunmap_atomic_primary(3, 5); break; - case 4: __kunmap_atomic_primary(4, 6); break; - case 5: __kunmap_atomic_primary(5, 7); break; - case 6: __kunmap_atomic_primary(6, 8); break; - case 7: __kunmap_atomic_primary(7, 9); break; - case 8: __kunmap_atomic_primary(8, 10); break; - - case 9 ... 9 + NR_TLB_LINES - 1: - __kunmap_atomic_secondary(type - 9, kvaddr); - break; default: BUG(); @@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) pagefault_enable(); } +void *__kmap_atomic(struct page *page); +void __kunmap_atomic(void *kvaddr); + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index c18b0d32e63..6bc241e4b4f 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -451,17 +451,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) -#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address)) +#define pte_unmap(pte) kunmap_atomic(pte) #else #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #endif /* diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index 85d110b71cf..41098a3803a 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c @@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dampr2 = __get_DAMPR(2); for (i = 0; i < nents; i++) { - vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE); + vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); } - kunmap_atomic(vaddr, __KM_CACHE); + kunmap_atomic_primary(vaddr, __KM_CACHE); if (dampr2) { __set_DAMPR(2, dampr2); __set_IAMPR(2, dampr2); diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c index 0261cbe153b..b24ade27a0f 100644 --- a/arch/frv/mm/cache-page.c +++ b/arch/frv/mm/cache-page.c @@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page) dampr2 = __get_DAMPR(2); - vaddr = kmap_atomic(page, __KM_CACHE); + vaddr = kmap_atomic_primary(page, __KM_CACHE); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); - kunmap_atomic(vaddr, __KM_CACHE); + kunmap_atomic_primary(vaddr, __KM_CACHE); if (dampr2) { __set_DAMPR(2, dampr2); @@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, dampr2 = __get_DAMPR(2); - vaddr = kmap_atomic(page, __KM_CACHE); + vaddr = kmap_atomic_primary(page, __KM_CACHE); start = (start & ~PAGE_MASK) | (unsigned long) vaddr; frv_cache_wback_inv(start, start + len); - kunmap_atomic(vaddr, __KM_CACHE); + kunmap_atomic_primary(vaddr, __KM_CACHE); if (dampr2) { __set_DAMPR(2, dampr2); diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c index eadd0765807..61088dcc159 100644 --- a/arch/frv/mm/highmem.c +++ b/arch/frv/mm/highmem.c @@ -36,3 +36,53 @@ struct page *kmap_atomic_to_page(void *ptr) { return virt_to_page(ptr); } + +void *__kmap_atomic(struct page *page) +{ + unsigned long paddr; + int type; + + pagefault_disable(); + type = kmap_atomic_idx_push(); + paddr = page_to_phys(page); + + switch (type) { + /* + * The first 4 primary maps are reserved for architecture code + */ + case 0: return __kmap_atomic_primary(4, paddr, 6); + case 1: return __kmap_atomic_primary(5, paddr, 7); + case 2: return __kmap_atomic_primary(6, paddr, 8); + case 3: return __kmap_atomic_primary(7, paddr, 9); + case 4: return __kmap_atomic_primary(8, paddr, 10); + + case 5 ... 5 + NR_TLB_LINES - 1: + return __kmap_atomic_secondary(type - 5, paddr); + + default: + BUG(); + return NULL; + } +} +EXPORT_SYMBOL(__kmap_atomic); + +void __kunmap_atomic(void *kvaddr) +{ + int type = kmap_atomic_idx_pop(); + switch (type) { + case 0: __kunmap_atomic_primary(4, 6); break; + case 1: __kunmap_atomic_primary(5, 7); break; + case 2: __kunmap_atomic_primary(6, 8); break; + case 3: __kunmap_atomic_primary(7, 9); break; + case 4: __kunmap_atomic_primary(8, 10); break; + + case 5 ... 5 + NR_TLB_LINES - 1: + __kunmap_atomic_secondary(type - 5, kvaddr); + break; + + default: + BUG(); + } + pagefault_enable(); +} +EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index c3286f42e50..1a97af31ef1 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -406,9 +406,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address) #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) -#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) /* atomic versions of the some PTE manipulations: */ diff --git a/arch/m32r/include/asm/pgtable.h b/arch/m32r/include/asm/pgtable.h index e6359c566b5..8a28cfea272 100644 --- a/arch/m32r/include/asm/pgtable.h +++ b/arch/m32r/include/asm/pgtable.h @@ -332,9 +332,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 2) & 0x1f) diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h index e41fea399bf..73b8c8fbed9 100644 --- a/arch/m68k/include/asm/entry_mm.h +++ b/arch/m68k/include/asm/entry_mm.h @@ -50,14 +50,6 @@ LFLUSH_I_AND_D = 0x00000808 -/* process bits for task_struct.ptrace */ -PT_TRACESYS_OFF = 3 -PT_TRACESYS_BIT = 1 -PT_PTRACED_OFF = 3 -PT_PTRACED_BIT = 0 -PT_DTRACE_OFF = 3 -PT_DTRACE_BIT = 2 - #define SAVE_ALL_INT save_all_int #define SAVE_ALL_SYS save_all_sys #define RESTORE_ALL restore_all diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h index 80e41492aa2..26be277394f 100644 --- a/arch/m68k/include/asm/entry_no.h +++ b/arch/m68k/include/asm/entry_no.h @@ -32,16 +32,6 @@ #ifdef __ASSEMBLY__ -/* process bits for task_struct.flags */ -PF_TRACESYS_OFF = 3 -PF_TRACESYS_BIT = 5 -PF_PTRACED_OFF = 3 -PF_PTRACED_BIT = 4 -PF_DTRACE_OFF = 1 -PF_DTRACE_BIT = 5 - -LENOSYS = 38 - #define SWITCH_STACK_SIZE (6*4+4) /* Includes return address */ /* diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 8e9a8a754dd..45bd3f589bf 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -221,9 +221,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address) } #define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) -#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address) #define pte_unmap(pte) ((void)0) -#define pte_unmap_nested(pte) ((void)0) /* * Allocate and free page tables. The xxx_kernel() versions are diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h index f847ec732d6..cf5fad9b525 100644 --- a/arch/m68k/include/asm/sun3_pgtable.h +++ b/arch/m68k/include/asm/sun3_pgtable.h @@ -219,9 +219,7 @@ static inline pte_t pgoff_to_pte(unsigned off) #define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address)) /* FIXME: should we bother with kmap() here? */ #define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + pte_index(address)) -#define pte_offset_map_nested(pmd, address) pte_offset_map(pmd, address) #define pte_unmap(pte) kunmap(pte) -#define pte_unmap_nested(pte) kunmap(pte) /* Macros to (de)construct the fake PTEs representing swap pages. */ #define __swp_type(x) ((x).val & 0x7F) diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index d4f421672d3..cae268c22ba 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -504,12 +504,9 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) #define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) -#define pte_offset_map_nested(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) + ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) -#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) +#define pte_unmap(pte) kunmap_atomic(pte) /* Encode and decode a nonlinear file mapping entry */ #define PTE_FILE_MAX_BITS 29 diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h index 75753ca73bf..77e644082a3 100644 --- a/arch/mips/include/asm/highmem.h +++ b/arch/mips/include/asm/highmem.h @@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table; extern void * kmap_high(struct page *page); extern void kunmap_high(struct page *page); -extern void *__kmap(struct page *page); -extern void __kunmap(struct page *page); -extern void *__kmap_atomic(struct page *page, enum km_type type); -extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); -extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); -extern struct page *__kmap_atomic_to_page(void *ptr); - -#define kmap __kmap -#define kunmap __kunmap -#define kmap_atomic __kmap_atomic -#define kunmap_atomic_notypecheck __kunmap_atomic_notypecheck -#define kmap_atomic_to_page __kmap_atomic_to_page +extern void *kmap(struct page *page); +extern void kunmap(struct page *page); +extern void *__kmap_atomic(struct page *page); +extern void __kunmap_atomic(void *kvaddr); +extern void *kmap_atomic_pfn(unsigned long pfn); +extern struct page *kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() flush_cache_all() diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index ae90412556d..8a153d2fa62 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -154,10 +154,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) #define pte_unmap(pte) ((void)(pte)) -#define pte_unmap_nested(pte) ((void)(pte)) #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 1be4b0fa30d..f00896087dd 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -257,10 +257,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) #define pte_unmap(pte) ((void)(pte)) -#define pte_unmap_nested(pte) ((void)(pte)) /* * Initialize a new pgd / pmd table with invalid pointers. diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 6a2b1bf9ef1..1e69b1fb4b8 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -9,7 +9,7 @@ static pte_t *kmap_pte; unsigned long highstart_pfn, highend_pfn; -void *__kmap(struct page *page) +void *kmap(struct page *page) { void *addr; @@ -21,16 +21,16 @@ void *__kmap(struct page *page) return addr; } -EXPORT_SYMBOL(__kmap); +EXPORT_SYMBOL(kmap); -void __kunmap(struct page *page) +void kunmap(struct page *page) { BUG_ON(in_interrupt()); if (!PageHighMem(page)) return; kunmap_high(page); } -EXPORT_SYMBOL(__kunmap); +EXPORT_SYMBOL(kunmap); /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because @@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap); * kmaps are appropriate for short, tight code paths only. */ -void *__kmap_atomic(struct page *page, enum km_type type) +void *__kmap_atomic(struct page *page) { - enum fixed_addresses idx; unsigned long vaddr; + int idx, type; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); - debug_kmap_atomic(type); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM @@ -64,43 +64,47 @@ void *__kmap_atomic(struct page *page, enum km_type type) } EXPORT_SYMBOL(__kmap_atomic); -void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr) { -#ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + int type; if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); return; } - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + type = kmap_atomic_idx_pop(); +#ifdef CONFIG_DEBUG_HIGHMEM + { + int idx = type + KM_TYPE_NR * smp_processor_id(); - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it - */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); - local_flush_tlb_one(vaddr); -#endif + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(&init_mm, vaddr, kmap_pte-idx); + local_flush_tlb_one(vaddr); + } +#endif pagefault_enable(); } -EXPORT_SYMBOL(__kunmap_atomic_notypecheck); +EXPORT_SYMBOL(__kunmap_atomic); /* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +void *kmap_atomic_pfn(unsigned long pfn) { - enum fixed_addresses idx; unsigned long vaddr; + int idx, type; pagefault_disable(); - debug_kmap_atomic(type); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); @@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) return (void*) vaddr; } -struct page *__kmap_atomic_to_page(void *ptr) +struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h index b0b187a29b8..f577ba2268c 100644 --- a/arch/mn10300/include/asm/highmem.h +++ b/arch/mn10300/include/asm/highmem.h @@ -70,15 +70,16 @@ static inline void kunmap(struct page *page) * be used in IRQ contexts, so in some (very limited) cases we need * it. */ -static inline unsigned long kmap_atomic(struct page *page, enum km_type type) +static inline unsigned long __kmap_atomic(struct page *page) { - enum fixed_addresses idx; unsigned long vaddr; + int idx, type; + pagefault_disable(); if (page < highmem_start_page) return page_address(page); - debug_kmap_atomic(type); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #if HIGHMEM_DEBUG @@ -91,26 +92,35 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type) return vaddr; } -static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type) +static inline void __kunmap_atomic(unsigned long vaddr) { -#if HIGHMEM_DEBUG - enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id(); + int type; - if (vaddr < FIXADDR_START) /* FIXME */ + if (vaddr < FIXADDR_START) { /* FIXME */ + pagefault_enable(); return; + } - if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) - BUG(); + type = kmap_atomic_idx_pop(); - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it - */ - pte_clear(kmap_pte - idx); - __flush_tlb_one(vaddr); +#if HIGHMEM_DEBUG + { + unsigned int idx; + idx = type + KM_TYPE_NR * smp_processor_id(); + + if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) + BUG(); + + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(kmap_pte - idx); + __flush_tlb_one(vaddr); + } #endif + pagefault_enable(); } - #endif /* __KERNEL__ */ #endif /* _ASM_HIGHMEM_H */ diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h index 16d88577f3e..b049a8bd157 100644 --- a/arch/mn10300/include/asm/pgtable.h +++ b/arch/mn10300/include/asm/pgtable.h @@ -457,9 +457,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable) #define pte_offset_map(dir, address) \ ((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do {} while (0) -#define pte_unmap_nested(pte) do {} while (0) /* * The MN10300 has external MMU info in the form of a TLB: this is adapted from diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 01c15035e78..865f37a8a88 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -397,9 +397,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_offset_kernel(pmd, address) \ ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) -#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index d10d64a4be3..dbc264010d0 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table; extern void *kmap_high(struct page *page); extern void kunmap_high(struct page *page); -extern void *kmap_atomic_prot(struct page *page, enum km_type type, - pgprot_t prot); -extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); +extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); +extern void __kunmap_atomic(void *kvaddr); static inline void *kmap(struct page *page) { @@ -80,9 +79,9 @@ static inline void kunmap(struct page *page) kunmap_high(page); } -static inline void *kmap_atomic(struct page *page, enum km_type type) +static inline void *__kmap_atomic(struct page *page) { - return kmap_atomic_prot(page, type, kmap_prot); + return kmap_atomic_prot(page, kmap_prot); } static inline struct page *kmap_atomic_to_page(void *ptr) diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index a7db96f2b5c..47edde8c355 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -308,12 +308,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) #define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) -#define pte_offset_map_nested(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) - -#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) + ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) +#define pte_unmap(pte) kunmap_atomic(pte) /* * Encode and decode a swap entry. diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 49865045d56..2b09cd522d3 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -193,9 +193,7 @@ (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) #define pte_unmap(pte) do { } while(0) -#define pte_unmap_nested(pte) do { } while(0) /* to find an entry in a kernel page-table-directory */ /* This now only contains the vmalloc pages */ diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index d692989a431..441d2a722f0 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -238,9 +238,7 @@ static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) * memory in this pool does not change. */ if (spare_needed && reserve_freed) { - tmp = min(spare_needed, min(reserve_freed, - (viodev->cmo.entitled - - VIO_CMO_MIN_ENT))); + tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); vio_cmo.spare += tmp; viodev->cmo.entitled -= tmp; diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c index 857d4173f9c..b0848b462bb 100644 --- a/arch/powerpc/mm/highmem.c +++ b/arch/powerpc/mm/highmem.c @@ -29,17 +29,17 @@ * be used in IRQ contexts, so in some (very limited) cases we need * it. */ -void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) +void *kmap_atomic_prot(struct page *page, pgprot_t prot) { - unsigned int idx; unsigned long vaddr; + int idx, type; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); - debug_kmap_atomic(type); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM @@ -52,26 +52,33 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) } EXPORT_SYMBOL(kmap_atomic_prot); -void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr) { -#ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + int type; if (vaddr < __fix_to_virt(FIX_KMAP_END)) { pagefault_enable(); return; } - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + type = kmap_atomic_idx_pop(); - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it - */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); - local_flush_tlb_page(NULL, vaddr); +#ifdef CONFIG_DEBUG_HIGHMEM + { + unsigned int idx; + + idx = type + KM_TYPE_NR * smp_processor_id(); + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(&init_mm, vaddr, kmap_pte-idx); + local_flush_tlb_page(NULL, vaddr); + } #endif pagefault_enable(); } -EXPORT_SYMBOL(kunmap_atomic_notypecheck); +EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 986dc9476c2..02ace3491c5 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1094,9 +1094,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) -#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) /* * 31 bit swap entry format: diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index ccf38f06c57..2fd46980768 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -88,10 +88,7 @@ static inline void pmd_clear(pmd_t *pmdp) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) #define pte_unmap(pte) ((void)(pte)) -#define pte_unmap_nested(pte) ((void)(pte)) /* * Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken, diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index e172d696e52..69fdfbf14ea 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h @@ -429,10 +429,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) -#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address) - #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #ifdef CONFIG_X2TLB #define pte_ERROR(e) \ diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h index 0ee46776dad..10a48111226 100644 --- a/arch/sh/include/asm/pgtable_64.h +++ b/arch/sh/include/asm/pgtable_64.h @@ -84,9 +84,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #ifndef __ASSEMBLY__ #define IOBASE_VADDR 0xff000000 diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h index ec23b0a87b9..3d7afbb7f4b 100644 --- a/arch/sparc/include/asm/highmem.h +++ b/arch/sparc/include/asm/highmem.h @@ -70,8 +70,8 @@ static inline void kunmap(struct page *page) kunmap_high(page); } -extern void *kmap_atomic(struct page *page, enum km_type type); -extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); +extern void *__kmap_atomic(struct page *page); +extern void __kunmap_atomic(void *kvaddr); extern struct page *kmap_atomic_to_page(void *vaddr); #define flush_cache_kmaps() flush_cache_all() diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 0ece77f4775..303bd4dc829 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -304,10 +304,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long) * and sun4c is guaranteed to have no highmem anyway. */ #define pte_offset_map(d, a) pte_offset_kernel(d,a) -#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a) - #define pte_unmap(pte) do{}while(0) -#define pte_unmap_nested(pte) do{}while(0) /* Certain architectures need to do special things when pte's * within a page table are directly modified. Thus, the following diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index f5b5fa76c02..f8dddb7045b 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -652,9 +652,7 @@ static inline int pte_special(pte_t pte) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) #define pte_offset_kernel pte_index #define pte_offset_map pte_index -#define pte_offset_map_nested pte_index #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) /* Actual page table PTE updates. */ extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig); diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c index e139e9cbf5f..5e50c09b7dc 100644 --- a/arch/sparc/mm/highmem.c +++ b/arch/sparc/mm/highmem.c @@ -29,17 +29,17 @@ #include <asm/tlbflush.h> #include <asm/fixmap.h> -void *kmap_atomic(struct page *page, enum km_type type) +void *__kmap_atomic(struct page *page) { - unsigned long idx; unsigned long vaddr; + long idx, type; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); - debug_kmap_atomic(type); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); @@ -63,44 +63,50 @@ void *kmap_atomic(struct page *page, enum km_type type) return (void*) vaddr; } -EXPORT_SYMBOL(kmap_atomic); +EXPORT_SYMBOL(__kmap_atomic); -void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr) { -#ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); + int type; if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); return; } - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); + type = kmap_atomic_idx_pop(); -/* XXX Fix - Anton */ +#ifdef CONFIG_DEBUG_HIGHMEM + { + unsigned long idx; + + idx = type + KM_TYPE_NR * smp_processor_id(); + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); + + /* XXX Fix - Anton */ #if 0 - __flush_cache_one(vaddr); + __flush_cache_one(vaddr); #else - flush_cache_all(); + flush_cache_all(); #endif - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it - */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); -/* XXX Fix - Anton */ + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(&init_mm, vaddr, kmap_pte-idx); + /* XXX Fix - Anton */ #if 0 - __flush_tlb_one(vaddr); + __flush_tlb_one(vaddr); #else - flush_tlb_all(); + flush_tlb_all(); #endif + } #endif - pagefault_enable(); } -EXPORT_SYMBOL(kunmap_atomic_notypecheck); +EXPORT_SYMBOL(__kunmap_atomic); /* We may be fed a pagetable here by ptep_to_xxx and others. */ struct page *kmap_atomic_to_page(void *ptr) diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h index d155db6fa9b..e0f7ee18672 100644 --- a/arch/tile/include/asm/highmem.h +++ b/arch/tile/include/asm/highmem.h @@ -60,12 +60,12 @@ void *kmap_fix_kpte(struct page *page, int finished); /* This macro is used only in map_new_virtual() to map "page". */ #define kmap_prot page_to_kpgprot(page) -void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); -void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); +void *__kmap_atomic(struct page *page); +void __kunmap_atomic(void *kvaddr); +void *kmap_atomic_pfn(unsigned long pfn); +void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); struct page *kmap_atomic_to_page(void *ptr); -void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); -void *kmap_atomic(struct page *page, enum km_type type); +void *kmap_atomic_prot(struct page *page, pgprot_t prot); void kmap_atomic_fix_kpte(struct page *page, int finished); #define flush_cache_kmaps() do { } while (0) diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index b3367379d53..dc4ccdd855b 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h @@ -347,15 +347,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); #define pte_offset_map(dir, address) \ _pte_offset_map(dir, address, KM_PTE0) -#define pte_offset_map_nested(dir, address) \ - _pte_offset_map(dir, address, KM_PTE1) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #endif /* Clear a non-executable kernel PTE and flush it from the TLB. */ diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c index 12ab137e7d4..8ef6595e162 100644 --- a/arch/tile/mm/highmem.c +++ b/arch/tile/mm/highmem.c @@ -56,50 +56,6 @@ void kunmap(struct page *page) } EXPORT_SYMBOL(kunmap); -static void debug_kmap_atomic_prot(enum km_type type) -{ -#ifdef CONFIG_DEBUG_HIGHMEM - static unsigned warn_count = 10; - - if (unlikely(warn_count == 0)) - return; - - if (unlikely(in_interrupt())) { - if (in_irq()) { - if (type != KM_IRQ0 && type != KM_IRQ1 && - type != KM_BIO_SRC_IRQ && - /* type != KM_BIO_DST_IRQ && */ - type != KM_BOUNCE_READ) { - WARN_ON(1); - warn_count--; - } - } else if (!irqs_disabled()) { /* softirq */ - if (type != KM_IRQ0 && type != KM_IRQ1 && - type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && - type != KM_SKB_SUNRPC_DATA && - type != KM_SKB_DATA_SOFTIRQ && - type != KM_BOUNCE_READ) { - WARN_ON(1); - warn_count--; - } - } - } - - if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || - type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) { - if (!irqs_disabled()) { - WARN_ON(1); - warn_count--; - } - } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { - if (irq_count() == 0 && !irqs_disabled()) { - WARN_ON(1); - warn_count--; - } - } -#endif -} - /* * Describe a single atomic mapping of a page on a given cpu at a * given address, and allow it to be linked into a list. @@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished) * When holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ -void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) +void *kmap_atomic_prot(struct page *page, pgprot_t prot) { - enum fixed_addresses idx; unsigned long vaddr; + int idx, type; pte_t *pte; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ @@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) if (!PageHighMem(page)) return page_address(page); - debug_kmap_atomic_prot(type); - + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); pte = kmap_get_pte(vaddr); @@ -269,25 +224,31 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) } EXPORT_SYMBOL(kmap_atomic_prot); -void *kmap_atomic(struct page *page, enum km_type type) +void *__kmap_atomic(struct page *page) { /* PAGE_NONE is a magic value that tells us to check immutability. */ return kmap_atomic_prot(page, type, PAGE_NONE); } -EXPORT_SYMBOL(kmap_atomic); +EXPORT_SYMBOL(__kmap_atomic); -void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); - /* - * Force other mappings to Oops if they try to access this pte without - * first remapping it. Keeping stale mappings around is a bad idea. - */ - if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) { + if (vaddr >= __fix_to_virt(FIX_KMAP_END) && + vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { pte_t *pte = kmap_get_pte(vaddr); pte_t pteval = *pte; + int idx, type; + + type = kmap_atomic_idx_pop(); + idx = type + KM_TYPE_NR*smp_processor_id(); + + /* + * Force other mappings to Oops if they try to access this pte + * without first remapping it. Keeping stale mappings around + * is a bad idea. + */ BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); kmap_atomic_unregister(pte_page(pteval), vaddr); kpte_clear_flush(pte, vaddr); @@ -300,19 +261,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) arch_flush_lazy_mmu_mode(); pagefault_enable(); } -EXPORT_SYMBOL(kunmap_atomic_notypecheck); +EXPORT_SYMBOL(__kunmap_atomic); /* * This API is supposed to allow us to map memory without a "struct page". * Currently we don't support this, though this may change in the future. */ -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +void *kmap_atomic_pfn(unsigned long pfn) { - return kmap_atomic(pfn_to_page(pfn), type); + return kmap_atomic(pfn_to_page(pfn)); } -void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) +void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { - return kmap_atomic_prot(pfn_to_page(pfn), type, prot); + return kmap_atomic_prot(pfn_to_page(pfn), prot); } struct page *kmap_atomic_to_page(void *ptr) diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um index ec2b8da1aba..50d6aa20c35 100644 --- a/arch/um/Kconfig.um +++ b/arch/um/Kconfig.um @@ -120,6 +120,9 @@ config SMP If you don't know what to do, say N. +config GENERIC_HARDIRQS_NO__DO_IRQ + def_bool y + config NR_CPUS int "Maximum number of CPUs (2-32)" range 2 32 @@ -147,3 +150,6 @@ config KERNEL_STACK_ORDER This option determines the size of UML kernel stacks. They will be 1 << order pages. The default is OK unless you're running Valgrind on UML, in which case, set this to 3. + +config NO_DMA + def_bool y diff --git a/arch/um/defconfig b/arch/um/defconfig index 6bd456f96f9..564f3de65b4 100644 --- a/arch/um/defconfig +++ b/arch/um/defconfig @@ -566,7 +566,6 @@ CONFIG_CRC32=m # CONFIG_CRC7 is not set # CONFIG_LIBCRC32C is not set CONFIG_PLIST=y -CONFIG_HAS_DMA=y # # SCSI device support diff --git a/arch/um/include/asm/dma-mapping.h b/arch/um/include/asm/dma-mapping.h deleted file mode 100644 index 1f469e80fdd..00000000000 --- a/arch/um/include/asm/dma-mapping.h +++ /dev/null @@ -1,112 +0,0 @@ -#ifndef _ASM_DMA_MAPPING_H -#define _ASM_DMA_MAPPING_H - -#include <asm/scatterlist.h> - -static inline int -dma_supported(struct device *dev, u64 mask) -{ - BUG(); - return(0); -} - -static inline int -dma_set_mask(struct device *dev, u64 dma_mask) -{ - BUG(); - return(0); -} - -static inline void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flag) -{ - BUG(); - return((void *) 0); -} - -static inline void -dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) -{ - BUG(); -} - -static inline dma_addr_t -dma_map_single(struct device *dev, void *cpu_addr, size_t size, - enum dma_data_direction direction) -{ - BUG(); - return(0); -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) -{ - BUG(); -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG(); - return(0); -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG(); -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - BUG(); - return(0); -} - -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG(); -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - BUG(); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - BUG(); -} - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - BUG(); -} - -static inline int -dma_mapping_error(struct device *dev, dma_addr_t dma_handle) -{ - BUG(); - return 0; -} - -#endif diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index a9f7251b4a8..41474fb5eee 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -338,9 +338,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) struct mm_struct; extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); diff --git a/arch/um/include/asm/system.h b/arch/um/include/asm/system.h index 93af1cf0907..68a90ecd145 100644 --- a/arch/um/include/asm/system.h +++ b/arch/um/include/asm/system.h @@ -8,23 +8,38 @@ extern int set_signals(int enable); extern void block_signals(void); extern void unblock_signals(void); -#define local_save_flags(flags) do { typecheck(unsigned long, flags); \ - (flags) = get_signals(); } while(0) -#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \ - set_signals(flags); } while(0) - -#define local_irq_save(flags) do { local_save_flags(flags); \ - local_irq_disable(); } while(0) - -#define local_irq_enable() unblock_signals() -#define local_irq_disable() block_signals() - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - (flags == 0); \ -}) +static inline unsigned long arch_local_save_flags(void) +{ + return get_signals(); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + set_signals(flags); +} + +static inline void arch_local_irq_enable(void) +{ + unblock_signals(); +} + +static inline void arch_local_irq_disable(void) +{ + block_signals(); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + flags = arch_local_save_flags(); + arch_local_irq_disable(); + return flags; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_local_save_flags() == 0; +} extern void *_switch_to(void *prev, void *next, void *last); #define switch_to(prev, next, last) prev = _switch_to(prev, next, last) diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index 69268014dd8..a3cab6d3ae0 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -50,8 +50,18 @@ SECTIONS .rela.got : { *(.rela.got) } .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } - .rel.plt : { *(.rel.plt) } - .rela.plt : { *(.rela.plt) } + .rel.plt : { + *(.rel.plt) + PROVIDE_HIDDEN(__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN(__rel_iplt_end = .); + } + .rela.plt : { + *(.rela.plt) + PROVIDE_HIDDEN(__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN(__rela_iplt_end = .); + } .init : { KEEP (*(.init)) } =0x90909090 diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index a746e3037a5..3f0ac9e0c96 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -334,7 +334,7 @@ unsigned int do_IRQ(int irq, struct uml_pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs); irq_enter(); - __do_IRQ(irq); + generic_handle_irq(irq); irq_exit(); set_irq_regs(old_regs); return 1; @@ -391,17 +391,10 @@ void __init init_IRQ(void) { int i; - irq_desc[TIMER_IRQ].status = IRQ_DISABLED; - irq_desc[TIMER_IRQ].action = NULL; - irq_desc[TIMER_IRQ].depth = 1; - irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type; - enable_irq(TIMER_IRQ); + set_irq_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq); + for (i = 1; i < NR_IRQS; i++) { - irq_desc[i].status = IRQ_DISABLED; - irq_desc[i].action = NULL; - irq_desc[i].depth = 1; - irq_desc[i].chip = &normal_irq_type; - enable_irq(i); + set_irq_chip_and_handler(i, &normal_irq_type, handle_edge_irq); } } diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index ec637855067..fbd99402d4d 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -22,7 +22,7 @@ SECTIONS _text = .; _stext = .; __init_begin = .; - INIT_TEXT_SECTION(PAGE_SIZE) + INIT_TEXT_SECTION(0) . = ALIGN(PAGE_SIZE); .text : @@ -43,6 +43,23 @@ SECTIONS __syscall_stub_end = .; } + /* + * These are needed even in a static link, even if they wind up being empty. + * Newer glibc needs these __rel{,a}_iplt_{start,end} symbols. + */ + .rel.plt : { + *(.rel.plt) + PROVIDE_HIDDEN(__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN(__rel_iplt_end = .); + } + .rela.plt : { + *(.rela.plt) + PROVIDE_HIDDEN(__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN(__rela_iplt_end = .); + } + #include "asm/common.lds.S" init.data : { INIT_DATA } diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c index dec5678fc17..6e3359d6a83 100644 --- a/arch/um/os-Linux/time.c +++ b/arch/um/os-Linux/time.c @@ -60,7 +60,7 @@ static inline long long timeval_to_ns(const struct timeval *tv) long long disable_timer(void) { struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } }); - int remain, max = UM_NSEC_PER_SEC / UM_HZ; + long long remain, max = UM_NSEC_PER_SEC / UM_HZ; if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0) printk(UM_KERN_ERR "disable_timer - setitimer failed, " diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 8caac76ac32..3bd04022fd0 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h @@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page); void *kmap(struct page *page); void kunmap(struct page *page); -void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); -void *kmap_atomic(struct page *page, enum km_type type); -void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); -void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); + +void *kmap_atomic_prot(struct page *page, pgprot_t prot); +void *__kmap_atomic(struct page *page); +void __kunmap_atomic(void *kvaddr); +void *kmap_atomic_pfn(unsigned long pfn); +void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); struct page *kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() do { } while (0) diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index c4191b3b705..363e33eb6ec 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h @@ -27,10 +27,10 @@ #include <asm/tlbflush.h> void __iomem * -iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); +iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); void -iounmap_atomic(void __iomem *kvaddr, enum km_type type); +iounmap_atomic(void __iomem *kvaddr); int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 8abde9ec90b..0c92113c4cb 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -49,24 +49,14 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); #endif #if defined(CONFIG_HIGHPTE) -#define __KM_PTE \ - (in_nmi() ? KM_NMI_PTE : \ - in_irq() ? KM_IRQ_PTE : \ - KM_PTE0) #define pte_offset_map(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \ + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ pte_index((address))) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \ - pte_index((address))) -#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) -#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) +#define pte_unmap(pte) kunmap_atomic((pte)) #else #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) -#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #endif /* Clear a kernel PTE and flush it from the TLB */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index f96ac9bedf7..f86da20347f 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -127,9 +127,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } /* x86-64 always has all page tables mapped. */ #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) -#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) #define pte_unmap(pte) ((void)(pte))/* NOP */ -#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */ #define update_mmu_cache(vma, address, ptep) do { } while (0) diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 12cd823c8d0..17ad0336621 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -327,6 +327,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; + l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; } static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index fe73c1844a9..c1e8c7a5116 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -49,7 +49,6 @@ static unsigned long copy_from_user_nmi(void *to, const void __user *from, unsigned long n) { unsigned long offset, addr = (unsigned long)from; - int type = in_nmi() ? KM_NMI : KM_IRQ0; unsigned long size, len = 0; struct page *page; void *map; @@ -63,9 +62,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) offset = addr & (PAGE_SIZE - 1); size = min(PAGE_SIZE - offset, n - len); - map = kmap_atomic(page, type); + map = kmap_atomic(page); memcpy(to, map+offset, size); - kunmap_atomic(map, type); + kunmap_atomic(map); put_page(page); len += size; diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c index 67414550c3c..d5cd13945d5 100644 --- a/arch/x86/kernel/crash_dump_32.c +++ b/arch/x86/kernel/crash_dump_32.c @@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, if (!is_crashed_pfn_valid(pfn)) return -EFAULT; - vaddr = kmap_atomic_pfn(pfn, KM_PTE0); + vaddr = kmap_atomic_pfn(pfn); if (!userbuf) { memcpy(buf, (vaddr + offset), csize); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index aff0b3c2750..ae03cab4352 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -713,7 +713,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, switch (action & 0xf) { case CPU_ONLINE: - INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); + INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); init_completion(&work.complete); /* FIXME: add schedule_work_on() */ schedule_delayed_work_on(cpu, &work.work, 0); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6af118511b4..6c7faecd9e4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -747,7 +747,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), }; - INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); alternatives_smp_switch(1); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 852b319edbd..7d90ceb882a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -919,9 +919,9 @@ spurious_fault(unsigned long error_code, unsigned long address) int show_unhandled_signals = 1; static inline int -access_error(unsigned long error_code, int write, struct vm_area_struct *vma) +access_error(unsigned long error_code, struct vm_area_struct *vma) { - if (write) { + if (error_code & PF_WRITE) { /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) return 1; @@ -956,8 +956,10 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) struct task_struct *tsk; unsigned long address; struct mm_struct *mm; - int write; int fault; + int write = error_code & PF_WRITE; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | + (write ? FAULT_FLAG_WRITE : 0); tsk = current; mm = tsk->mm; @@ -1068,6 +1070,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) bad_area_nosemaphore(regs, error_code, address); return; } +retry: down_read(&mm->mmap_sem); } else { /* @@ -1111,9 +1114,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) * we can handle it.. */ good_area: - write = error_code & PF_WRITE; - - if (unlikely(access_error(error_code, write, vma))) { + if (unlikely(access_error(error_code, vma))) { bad_area_access_error(regs, error_code, address); return; } @@ -1123,21 +1124,34 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault: */ - fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) { mm_fault_error(regs, error_code, address, fault); return; } - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, - regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, - regs, address); + /* + * Major/minor page fault accounting is only done on the + * initial attempt. If we go through a retry, it is extremely + * likely that the page will be found in page cache at that point. + */ + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) { + tsk->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + regs, address); + } else { + tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + regs, address); + } + if (fault & VM_FAULT_RETRY) { + /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + goto retry; + } } check_v8086_mode(regs, address, tsk); diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 5e8fa12ef86..d723e369003 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -9,6 +9,7 @@ void *kmap(struct page *page) return page_address(page); return kmap_high(page); } +EXPORT_SYMBOL(kmap); void kunmap(struct page *page) { @@ -18,6 +19,7 @@ void kunmap(struct page *page) return; kunmap_high(page); } +EXPORT_SYMBOL(kunmap); /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because @@ -27,10 +29,10 @@ void kunmap(struct page *page) * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ -void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) +void *kmap_atomic_prot(struct page *page, pgprot_t prot) { - enum fixed_addresses idx; unsigned long vaddr; + int idx, type; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); @@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) if (!PageHighMem(page)) return page_address(page); - debug_kmap_atomic(type); - + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); @@ -47,44 +48,56 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) return (void *)vaddr; } +EXPORT_SYMBOL(kmap_atomic_prot); + +void *__kmap_atomic(struct page *page) +{ + return kmap_atomic_prot(page, kmap_prot); +} +EXPORT_SYMBOL(__kmap_atomic); -void *kmap_atomic(struct page *page, enum km_type type) +/* + * This is the same as kmap_atomic() but can map memory that doesn't + * have a struct page associated with it. + */ +void *kmap_atomic_pfn(unsigned long pfn) { - return kmap_atomic_prot(page, type, kmap_prot); + return kmap_atomic_prot_pfn(pfn, kmap_prot); } +EXPORT_SYMBOL_GPL(kmap_atomic_pfn); -void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); - - /* - * Force other mappings to Oops if they'll try to access this pte - * without first remap it. Keeping stale mappings around is a bad idea - * also, in case the page changes cacheability attributes or becomes - * a protected page in a hypervisor. - */ - if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) + + if (vaddr >= __fix_to_virt(FIX_KMAP_END) && + vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { + int idx, type; + + type = kmap_atomic_idx_pop(); + idx = type + KM_TYPE_NR * smp_processor_id(); + +#ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); +#endif + /* + * Force other mappings to Oops if they'll try to access this + * pte without first remap it. Keeping stale mappings around + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ kpte_clear_flush(kmap_pte-idx, vaddr); - else { + } #ifdef CONFIG_DEBUG_HIGHMEM + else { BUG_ON(vaddr < PAGE_OFFSET); BUG_ON(vaddr >= (unsigned long)high_memory); -#endif } +#endif pagefault_enable(); } - -/* - * This is the same as kmap_atomic() but can map memory that doesn't - * have a struct page associated with it. - */ -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) -{ - return kmap_atomic_prot_pfn(pfn, type, kmap_prot); -} -EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ +EXPORT_SYMBOL(__kunmap_atomic); struct page *kmap_atomic_to_page(void *ptr) { @@ -98,12 +111,6 @@ struct page *kmap_atomic_to_page(void *ptr) pte = kmap_pte - (idx - FIX_KMAP_BEGIN); return pte_page(*pte); } - -EXPORT_SYMBOL(kmap); -EXPORT_SYMBOL(kunmap); -EXPORT_SYMBOL(kmap_atomic); -EXPORT_SYMBOL(kunmap_atomic_notypecheck); -EXPORT_SYMBOL(kmap_atomic_prot); EXPORT_SYMBOL(kmap_atomic_to_page); void __init set_highmem_pages_init(void) diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 72fc70cf618..75a3d7f24a2 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) } EXPORT_SYMBOL_GPL(iomap_create_wc); -void -iomap_free(resource_size_t base, unsigned long size) +void iomap_free(resource_size_t base, unsigned long size) { io_free_memtype(base, base + size); } EXPORT_SYMBOL_GPL(iomap_free); -void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) +void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { - enum fixed_addresses idx; unsigned long vaddr; + int idx, type; pagefault_disable(); - debug_kmap_atomic(type); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); @@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) } /* - * Map 'pfn' using fixed map 'type' and protections 'prot' + * Map 'pfn' using protections 'prot' */ void __iomem * -iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) +iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { /* * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. @@ -86,24 +85,33 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) prot = PAGE_KERNEL_UC_MINUS; - return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); + return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); } EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); void -iounmap_atomic(void __iomem *kvaddr, enum km_type type) +iounmap_atomic(void __iomem *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); - /* - * Force other mappings to Oops if they'll try to access this pte - * without first remap it. Keeping stale mappings around is a bad idea - * also, in case the page changes cacheability attributes or becomes - * a protected page in a hypervisor. - */ - if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) + if (vaddr >= __fix_to_virt(FIX_KMAP_END) && + vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { + int idx, type; + + type = kmap_atomic_idx_pop(); + idx = type + KM_TYPE_NR * smp_processor_id(); + +#ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); +#endif + /* + * Force other mappings to Oops if they'll try to access this + * pte without first remap it. Keeping stale mappings around + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ kpte_clear_flush(kmap_pte-idx, vaddr); + } pagefault_enable(); } diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 76bf3555411..b03c043ce75 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -324,10 +324,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) #define pte_offset_kernel(dir,addr) \ ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) - #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) /* |