aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-10-30 01:56:31 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-10-30 01:56:31 -0500
commit81cfb8864c73230eb1c37753aba517db15cf4d8f (patch)
tree649ff25543834cf9983ea41b93126bea97d75475 /include
parent0169e284f6b6b263cc7c2ed25986b96cd6fda610 (diff)
parent9f75e1eff3edb2bb07349b94c28f4f2a6c66ca43 (diff)
Merge branch 'master'
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/barrier.h2
-rw-r--r--include/asm-alpha/rwsem.h5
-rw-r--r--include/asm-arm/tlb.h23
-rw-r--r--include/asm-arm26/tlb.h47
-rw-r--r--include/asm-generic/4level-fixup.h11
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/asm-generic/tlb.h23
-rw-r--r--include/asm-i386/mmzone.h6
-rw-r--r--include/asm-i386/pgtable.h3
-rw-r--r--include/asm-i386/rwsem.h5
-rw-r--r--include/asm-ia64/rwsem.h5
-rw-r--r--include/asm-ia64/tlb.h19
-rw-r--r--include/asm-m32r/mmzone.h6
-rw-r--r--include/asm-parisc/cacheflush.h35
-rw-r--r--include/asm-parisc/mmzone.h6
-rw-r--r--include/asm-parisc/tlbflush.h3
-rw-r--r--include/asm-ppc/rwsem.h5
-rw-r--r--include/asm-ppc64/mmzone.h3
-rw-r--r--include/asm-ppc64/pgtable.h4
-rw-r--r--include/asm-ppc64/rwsem.h5
-rw-r--r--include/asm-s390/rwsem.h5
-rw-r--r--include/asm-sh/rwsem.h5
-rw-r--r--include/asm-sparc64/rwsem.h5
-rw-r--r--include/asm-sparc64/tlb.h29
-rw-r--r--include/asm-um/pgtable.h2
-rw-r--r--include/asm-x86_64/rwsem.h5
-rw-r--r--include/linux/buffer_head.h6
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/memory.h94
-rw-r--r--include/linux/memory_hotplug.h104
-rw-r--r--include/linux/mempolicy.h7
-rw-r--r--include/linux/mm.h150
-rw-r--r--include/linux/mmzone.h28
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/rwsem-spinlock.h5
-rw-r--r--include/linux/scatterlist.h17
-rw-r--r--include/linux/sched.h65
-rw-r--r--include/linux/vmalloc.h8
38 files changed, 521 insertions, 238 deletions
diff --git a/include/asm-alpha/barrier.h b/include/asm-alpha/barrier.h
index 229c83fe77c..681ff581afa 100644
--- a/include/asm-alpha/barrier.h
+++ b/include/asm-alpha/barrier.h
@@ -1,6 +1,8 @@
#ifndef __BARRIER_H
#define __BARRIER_H
+#include <asm/compiler.h>
+
#define mb() \
__asm__ __volatile__("mb": : :"memory")
diff --git a/include/asm-alpha/rwsem.h b/include/asm-alpha/rwsem.h
index 8e058a67c9a..fafdd4f7010 100644
--- a/include/asm-alpha/rwsem.h
+++ b/include/asm-alpha/rwsem.h
@@ -262,5 +262,10 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
#endif
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _ALPHA_RWSEM_H */
diff --git a/include/asm-arm/tlb.h b/include/asm-arm/tlb.h
index 9bb325c5464..f49bfb78c22 100644
--- a/include/asm-arm/tlb.h
+++ b/include/asm-arm/tlb.h
@@ -27,11 +27,7 @@
*/
struct mmu_gather {
struct mm_struct *mm;
- unsigned int freed;
unsigned int fullmm;
-
- unsigned int flushes;
- unsigned int avoided_flushes;
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -39,11 +35,9 @@ DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
- int cpu = smp_processor_id();
- struct mmu_gather *tlb = &per_cpu(mmu_gathers, cpu);
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
- tlb->freed = 0;
tlb->fullmm = full_mm_flush;
return tlb;
@@ -52,24 +46,13 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
- struct mm_struct *mm = tlb->mm;
- unsigned long freed = tlb->freed;
- int rss = get_mm_counter(mm, rss);
-
- if (rss < freed)
- freed = rss;
- add_mm_counter(mm, rss, -freed);
-
if (tlb->fullmm)
- flush_tlb_mm(mm);
+ flush_tlb_mm(tlb->mm);
/* keep the page table cache within bounds */
check_pgt_cache();
-}
-static inline unsigned int tlb_is_full_mm(struct mmu_gather *tlb)
-{
- return tlb->fullmm;
+ put_cpu_var(mmu_gathers);
}
#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
diff --git a/include/asm-arm26/tlb.h b/include/asm-arm26/tlb.h
index 1316352a58f..08ddd85b8d3 100644
--- a/include/asm-arm26/tlb.h
+++ b/include/asm-arm26/tlb.h
@@ -10,24 +10,20 @@
*/
struct mmu_gather {
struct mm_struct *mm;
- unsigned int freed;
- unsigned int fullmm;
-
- unsigned int flushes;
- unsigned int avoided_flushes;
+ unsigned int need_flush;
+ unsigned int fullmm;
};
-extern struct mmu_gather mmu_gathers[NR_CPUS];
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
- int cpu = smp_processor_id();
- struct mmu_gather *tlb = &mmu_gathers[cpu];
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
- tlb->freed = 0;
- tlb->fullmm = full_mm_flush;
+ tlb->need_flush = 0;
+ tlb->fullmm = full_mm_flush;
return tlb;
}
@@ -35,30 +31,13 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
- struct mm_struct *mm = tlb->mm;
- unsigned long freed = tlb->freed;
- int rss = get_mm_counter(mm, rss);
-
- if (rss < freed)
- freed = rss;
- add_mm_counter(mm, rss, -freed);
-
- if (freed) {
- flush_tlb_mm(mm);
- tlb->flushes++;
- } else {
- tlb->avoided_flushes++;
- }
+ if (tlb->need_flush)
+ flush_tlb_mm(tlb->mm);
/* keep the page table cache within bounds */
check_pgt_cache();
-}
-
-static inline unsigned int
-tlb_is_full_mm(struct mmu_gather *tlb)
-{
- return tlb->fullmm;
+ put_cpu_var(mmu_gathers);
}
#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
@@ -71,7 +50,13 @@ tlb_is_full_mm(struct mmu_gather *tlb)
} while (0)
#define tlb_end_vma(tlb,vma) do { } while (0)
-#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
+static inline void
+tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ tlb->need_flush = 1;
+ free_page_and_swap_cache(page);
+}
+
#define pte_free_tlb(tlb,ptep) pte_free(ptep)
#define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp)
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index c20ec257ecc..68c6fea994d 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -10,14 +10,9 @@
#define pud_t pgd_t
-#define pmd_alloc(mm, pud, address) \
-({ pmd_t *ret; \
- if (pgd_none(*pud)) \
- ret = __pmd_alloc(mm, pud, address); \
- else \
- ret = pmd_offset(pud, address); \
- ret; \
-})
+#define pmd_alloc(mm, pud, address) \
+ ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
+ NULL: pmd_offset(pud, address))
#define pud_alloc(mm, pgd, address) (pgd)
#define pud_offset(pgd, start) (pgd)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index ff28c8b31f5..7dca30a26c5 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -8,7 +8,7 @@
* - update the page tables
* - inform the TLB about the new one
*
- * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
+ * We hold the mm semaphore for reading, and the pte lock.
*
* Note: the old pte is known to not be writable, so we don't need to
* worry about dirty bits etc getting lost.
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 7d0298347ee..cdd4145243c 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -35,16 +35,13 @@
#endif
/* struct mmu_gather is an opaque type used by the mm code for passing around
- * any data needed by arch specific code for tlb_remove_page. This structure
- * can be per-CPU or per-MM as the page table lock is held for the duration of
- * TLB shootdown.
+ * any data needed by arch specific code for tlb_remove_page.
*/
struct mmu_gather {
struct mm_struct *mm;
unsigned int nr; /* set to ~0U means fast mode */
unsigned int need_flush;/* Really unmapped some ptes? */
unsigned int fullmm; /* non-zero means full mm flush */
- unsigned long freed;
struct page * pages[FREE_PTE_NR];
};
@@ -57,7 +54,7 @@ DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
- struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id());
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
@@ -65,7 +62,6 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
tlb->fullmm = full_mm_flush;
- tlb->freed = 0;
return tlb;
}
@@ -85,28 +81,17 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
- * that were required. The page table lock is still held at this point.
+ * that were required.
*/
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
- int freed = tlb->freed;
- struct mm_struct *mm = tlb->mm;
- int rss = get_mm_counter(mm, rss);
-
- if (rss < freed)
- freed = rss;
- add_mm_counter(mm, rss, -freed);
tlb_flush_mmu(tlb, start, end);
/* keep the page table cache within bounds */
check_pgt_cache();
-}
-static inline unsigned int
-tlb_is_full_mm(struct mmu_gather *tlb)
-{
- return tlb->fullmm;
+ put_cpu_var(mmu_gathers);
}
/* tlb_remove_page
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 348fe3a4879..620a90641ea 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -88,12 +88,6 @@ static inline int pfn_to_nid(unsigned long pfn)
__pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
})
-#define local_mapnr(kvaddr) \
-({ \
- unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \
- (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \
-})
-
/* XXX: FIXME -- wli */
#define kern_addr_valid(kaddr) (0)
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index d101ac414f0..0e3ec809352 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -203,7 +203,8 @@ extern unsigned long pg0[];
#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
-#define pmd_none(x) (!pmd_val(x))
+/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
+#define pmd_none(x) (!(unsigned long)pmd_val(x))
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index 7625a675852..be4ab859238 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -284,5 +284,10 @@ LOCK_PREFIX "xadd %0,(%2)"
return tmp+delta;
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _I386_RWSEM_H */
diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h
index e18b5ab0cb7..1327c91ea39 100644
--- a/include/asm-ia64/rwsem.h
+++ b/include/asm-ia64/rwsem.h
@@ -186,4 +186,9 @@ __downgrade_write (struct rw_semaphore *sem)
#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* _ASM_IA64_RWSEM_H */
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h
index 3a9a6d1be75..834370b9dea 100644
--- a/include/asm-ia64/tlb.h
+++ b/include/asm-ia64/tlb.h
@@ -60,7 +60,6 @@ struct mmu_gather {
unsigned int nr; /* == ~0U => fast mode */
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
- unsigned long freed; /* number of pages freed */
unsigned long start_addr;
unsigned long end_addr;
struct page *pages[FREE_PTE_NR];
@@ -129,7 +128,7 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
static inline struct mmu_gather *
tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
{
- struct mmu_gather *tlb = &__get_cpu_var(mmu_gathers);
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
/*
@@ -147,25 +146,17 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
*/
tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
tlb->fullmm = full_mm_flush;
- tlb->freed = 0;
tlb->start_addr = ~0UL;
return tlb;
}
/*
* Called at the end of the shootdown operation to free up any resources that were
- * collected. The page table lock is still held at this point.
+ * collected.
*/
static inline void
tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
- unsigned long freed = tlb->freed;
- struct mm_struct *mm = tlb->mm;
- unsigned long rss = get_mm_counter(mm, rss);
-
- if (rss < freed)
- freed = rss;
- add_mm_counter(mm, rss, -freed);
/*
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
* tlb->end_addr.
@@ -174,12 +165,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
/* keep the page table cache within bounds */
check_pgt_cache();
-}
-static inline unsigned int
-tlb_is_full_mm(struct mmu_gather *tlb)
-{
- return tlb->fullmm;
+ put_cpu_var(mmu_gathers);
}
/*
diff --git a/include/asm-m32r/mmzone.h b/include/asm-m32r/mmzone.h
index d58878ec899..adc7970a77e 100644
--- a/include/asm-m32r/mmzone.h
+++ b/include/asm-m32r/mmzone.h
@@ -21,12 +21,6 @@ extern struct pglist_data *node_data[];
__pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \
})
-#define local_mapnr(kvaddr) \
-({ \
- unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \
- (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \
-})
-
#define pfn_to_page(pfn) \
({ \
unsigned long __pfn = pfn; \
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index aa592d8c0e3..1bc3c83ee74 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -100,30 +100,34 @@ static inline void flush_cache_range(struct vm_area_struct *vma,
/* Simple function to work out if we have an existing address translation
* for a user space vma. */
-static inline pte_t *__translation_exists(struct mm_struct *mm,
- unsigned long addr)
+static inline int translation_exists(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
{
- pgd_t *pgd = pgd_offset(mm, addr);
+ pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pmd_t *pmd;
- pte_t *pte;
+ pte_t pte;
if(pgd_none(*pgd))
- return NULL;
+ return 0;
pmd = pmd_offset(pgd, addr);
if(pmd_none(*pmd) || pmd_bad(*pmd))
- return NULL;
+ return 0;
- pte = pte_offset_map(pmd, addr);
+ /* We cannot take the pte lock here: flush_cache_page is usually
+ * called with pte lock already held. Whereas flush_dcache_page
+ * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
+ * the vma itself is secure, but the pte might come or go racily.
+ */
+ pte = *pte_offset_map(pmd, addr);
+ /* But pte_unmap() does nothing on this architecture */
- /* The PA flush mappings show up as pte_none, but they're
- * valid none the less */
- if(pte_none(*pte) && ((pte_val(*pte) & _PAGE_FLUSH) == 0))
- return NULL;
- return pte;
-}
-#define translation_exists(vma, addr) __translation_exists((vma)->vm_mm, addr)
+ /* Filter out coincidental file entries and swap entries */
+ if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
+ return 0;
+ return pte_pfn(pte) == pfn;
+}
/* Private function to flush a page from the cache of a non-current
* process. cr25 contains the Page Directory of the current user
@@ -175,9 +179,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
- if(likely(translation_exists(vma, vmaddr)))
+ if (likely(translation_exists(vma, vmaddr, pfn)))
__flush_cache_page(vma, vmaddr);
}
#endif
-
diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h
index 595d3dce120..ae039f4fd71 100644
--- a/include/asm-parisc/mmzone.h
+++ b/include/asm-parisc/mmzone.h
@@ -27,12 +27,6 @@ extern struct node_map_data node_data[];
})
#define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid))
-#define local_mapnr(kvaddr) \
-({ \
- unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \
- (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \
-})
-
#define pfn_to_page(pfn) \
({ \
unsigned long __pfn = (pfn); \
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h
index 84af4ab1fe5..e97aa8d1eff 100644
--- a/include/asm-parisc/tlbflush.h
+++ b/include/asm-parisc/tlbflush.h
@@ -88,7 +88,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
-
+ preempt_disable();
mtsp(vma->vm_mm->context,1);
purge_tlb_start();
if (split_tlb) {
@@ -102,6 +102,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
pdtlb(start);
start += PAGE_SIZE;
}
+ preempt_enable();
}
purge_tlb_end();
}
diff --git a/include/asm-ppc/rwsem.h b/include/asm-ppc/rwsem.h
index 3e738f483c1..3501ea72f88 100644
--- a/include/asm-ppc/rwsem.h
+++ b/include/asm-ppc/rwsem.h
@@ -168,5 +168,10 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _PPC_RWSEM_XADD_H */
diff --git a/include/asm-ppc64/mmzone.h b/include/asm-ppc64/mmzone.h
index ed473f4b015..80a708e7093 100644
--- a/include/asm-ppc64/mmzone.h
+++ b/include/asm-ppc64/mmzone.h
@@ -67,9 +67,6 @@ static inline int pa_to_nid(unsigned long pa)
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
-#define local_mapnr(kvaddr) \
- ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr))
-
#ifdef CONFIG_DISCONTIGMEM
/*
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index c83679c9d2b..2eb1778a3a1 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -478,10 +478,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_ERROR(e) \
- printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e))
+ printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
diff --git a/include/asm-ppc64/rwsem.h b/include/asm-ppc64/rwsem.h
index bd5c2f09357..7a647fae376 100644
--- a/include/asm-ppc64/rwsem.h
+++ b/include/asm-ppc64/rwsem.h
@@ -163,5 +163,10 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _PPC_RWSEM_XADD_H */
diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h
index 8c0cebbfc03..0422a085dd5 100644
--- a/include/asm-s390/rwsem.h
+++ b/include/asm-s390/rwsem.h
@@ -351,5 +351,10 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
return new;
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _S390_RWSEM_H */
diff --git a/include/asm-sh/rwsem.h b/include/asm-sh/rwsem.h
index 1be4337f525..0262d3d1e5e 100644
--- a/include/asm-sh/rwsem.h
+++ b/include/asm-sh/rwsem.h
@@ -166,5 +166,10 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_SH_RWSEM_H */
diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h
index 4568ee4022d..cef5e827042 100644
--- a/include/asm-sparc64/rwsem.h
+++ b/include/asm-sparc64/rwsem.h
@@ -56,6 +56,11 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
atomic_add(delta, (atomic_t *)(&sem->count));
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _SPARC64_RWSEM_H */
diff --git a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h
index 9baf57db01d..66138d959df 100644
--- a/include/asm-sparc64/tlb.h
+++ b/include/asm-sparc64/tlb.h
@@ -25,9 +25,8 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int pages_nr;
unsigned int need_flush;
- unsigned int tlb_frozen;
+ unsigned int fullmm;
unsigned int tlb_nr;
- unsigned long freed;
unsigned long vaddrs[TLB_BATCH_NR];
struct page *pages[FREE_PTE_NR];
};
@@ -44,14 +43,13 @@ extern void flush_tlb_pending(void);
static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+ struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
BUG_ON(mp->tlb_nr);
mp->mm = mm;
mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
- mp->tlb_frozen = full_mm_flush;
- mp->freed = 0;
+ mp->fullmm = full_mm_flush;
return mp;
}
@@ -78,30 +76,19 @@ extern void smp_flush_tlb_mm(struct mm_struct *mm);
static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
{
- unsigned long freed = mp->freed;
- struct mm_struct *mm = mp->mm;
- unsigned long rss = get_mm_counter(mm, rss);
-
- if (rss < freed)
- freed = rss;
- add_mm_counter(mm, rss, -freed);
-
tlb_flush_mmu(mp);
- if (mp->tlb_frozen) {
- if (CTX_VALID(mm->context))
- do_flush_tlb_mm(mm);
- mp->tlb_frozen = 0;
+ if (mp->fullmm) {
+ if (CTX_VALID(mp->mm->context))
+ do_flush_tlb_mm(mp->mm);
+ mp->fullmm = 0;
} else
flush_tlb_pending();
/* keep the page table cache within bounds */
check_pgt_cache();
-}
-static inline unsigned int tlb_is_full_mm(struct mmu_gather *mp)
-{
- return mp->tlb_frozen;
+ put_cpu_var(mmu_gathers);
}
static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h
index 616d02b57ea..ac64eb95586 100644
--- a/include/asm-um/pgtable.h
+++ b/include/asm-um/pgtable.h
@@ -138,7 +138,7 @@ extern unsigned long pg0[1024];
#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
-#define pmd_none(x) (!(pmd_val(x) & ~_PAGE_NEWPAGE))
+#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
diff --git a/include/asm-x86_64/rwsem.h b/include/asm-x86_64/rwsem.h
index c002175b6e8..46077e9c191 100644
--- a/include/asm-x86_64/rwsem.h
+++ b/include/asm-x86_64/rwsem.h
@@ -274,5 +274,10 @@ LOCK_PREFIX "xaddl %0,(%2)"
return tmp+delta;
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+