aboutsummaryrefslogtreecommitdiff
path: root/include/asm-generic/tlb.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic/tlb.h')
-rw-r--r--include/asm-generic/tlb.h38
1 files changed, 20 insertions, 18 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index f96a5b58a97..5672d7ea1fa 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -78,6 +78,14 @@ struct mmu_gather_batch {
#define MAX_GATHER_BATCH \
((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
+/*
+ * Limit the maximum number of mmu_gather batches to reduce a risk of soft
+ * lockups for non-preemptible kernels on huge machines when a lot of memory
+ * is zapped during unmapping.
+ * 10K pages freed at once should be safe even without a preemption point.
+ */
+#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
+
/* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page.
*/
@@ -86,34 +94,28 @@ struct mmu_gather {
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch;
#endif
+ unsigned long start;
+ unsigned long end;
unsigned int need_flush : 1, /* Did free PTEs */
- fast_mode : 1; /* No batching */
-
- unsigned int fullmm;
+ /* we are in the middle of an operation to clear
+ * a full mm and can make some optimizations */
+ fullmm : 1,
+ /* we have performed an operation which
+ * requires a complete flush of the tlb */
+ need_flush_all : 1;
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
+ unsigned int batch_count;
};
#define HAVE_GENERIC_MMU_GATHER
-static inline int tlb_fast_mode(struct mmu_gather *tlb)
-{
-#ifdef CONFIG_SMP
- return tlb->fast_mode;
-#else
- /*
- * For UP we don't need to worry about TLB flush
- * and page free order so much..
- */
- return 1;
-#endif
-}
-
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb);
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
+void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
+ unsigned long end);
int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
/* tlb_remove_page