diff options
Diffstat (limited to 'mm/mmu_notifier.c')
| -rw-r--r-- | mm/mmu_notifier.c | 188 |
1 files changed, 111 insertions, 77 deletions
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 438951d366f..41cefdf0aad 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -11,13 +11,17 @@ #include <linux/rculist.h> #include <linux/mmu_notifier.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/mm.h> #include <linux/err.h> +#include <linux/srcu.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/slab.h> +/* global SRCU for all MMs */ +static struct srcu_struct srcu; + /* * This function can't run concurrently against mmu_notifier_register * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap @@ -25,14 +29,31 @@ * in parallel despite there being no task using this mm any more, * through the vmas outside of the exit_mmap context, such as with * vmtruncate. This serializes against mmu_notifier_unregister with - * the mmu_notifier_mm->lock in addition to RCU and it serializes - * against the other mmu notifiers with RCU. struct mmu_notifier_mm + * the mmu_notifier_mm->lock in addition to SRCU and it serializes + * against the other mmu notifiers with SRCU. struct mmu_notifier_mm * can't go away from under us as exit_mmap holds an mm_count pin * itself. */ void __mmu_notifier_release(struct mm_struct *mm) { struct mmu_notifier *mn; + int id; + + /* + * SRCU here will block mmu_notifier_unregister until + * ->release returns. + */ + id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) + /* + * If ->release runs before mmu_notifier_unregister it must be + * handled, as it's the only way for the driver to flush all + * existing sptes and stop the driver from establishing any more + * sptes before all the pages in the mm are freed. + */ + if (mn->ops->release) + mn->ops->release(mn, mm); + srcu_read_unlock(&srcu, id); spin_lock(&mm->mmu_notifier_mm->lock); while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { @@ -41,41 +62,24 @@ void __mmu_notifier_release(struct mm_struct *mm) hlist); /* * We arrived before mmu_notifier_unregister so - * mmu_notifier_unregister will do nothing other than - * to wait ->release to finish and - * mmu_notifier_unregister to return. + * mmu_notifier_unregister will do nothing other than to wait + * for ->release to finish and for mmu_notifier_unregister to + * return. */ hlist_del_init_rcu(&mn->hlist); - /* - * RCU here will block mmu_notifier_unregister until - * ->release returns. - */ - rcu_read_lock(); - spin_unlock(&mm->mmu_notifier_mm->lock); - /* - * if ->release runs before mmu_notifier_unregister it - * must be handled as it's the only way for the driver - * to flush all existing sptes and stop the driver - * from establishing any more sptes before all the - * pages in the mm are freed. - */ - if (mn->ops->release) - mn->ops->release(mn, mm); - rcu_read_unlock(); - spin_lock(&mm->mmu_notifier_mm->lock); } spin_unlock(&mm->mmu_notifier_mm->lock); /* - * synchronize_rcu here prevents mmu_notifier_release to - * return to exit_mmap (which would proceed freeing all pages - * in the mm) until the ->release method returns, if it was - * invoked by mmu_notifier_unregister. + * synchronize_srcu here prevents mmu_notifier_release from returning to + * exit_mmap (which would proceed with freeing all pages in the mm) + * until the ->release method returns, if it was invoked by + * mmu_notifier_unregister. * - * The mmu_notifier_mm can't go away from under us because one - * mm_count is hold by exit_mmap. + * The mmu_notifier_mm can't go away from under us because one mm_count + * is held by exit_mmap. */ - synchronize_rcu(); + synchronize_srcu(&srcu); } /* @@ -87,15 +91,33 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long address) { struct mmu_notifier *mn; - struct hlist_node *n; - int young = 0; + int young = 0, id; - rcu_read_lock(); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->clear_flush_young) young |= mn->ops->clear_flush_young(mn, mm, address); } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); + + return young; +} + +int __mmu_notifier_test_young(struct mm_struct *mm, + unsigned long address) +{ + struct mmu_notifier *mn; + int young = 0, id; + + id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->test_young) { + young = mn->ops->test_young(mn, mm, address); + if (young) + break; + } + } + srcu_read_unlock(&srcu, id); return young; } @@ -104,63 +126,59 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte) { struct mmu_notifier *mn; - struct hlist_node *n; + int id; - rcu_read_lock(); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->change_pte) mn->ops->change_pte(mn, mm, address, pte); - /* - * Some drivers don't have change_pte, - * so we must call invalidate_page in that case. - */ - else if (mn->ops->invalidate_page) - mn->ops->invalidate_page(mn, mm, address); } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); } void __mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address) { struct mmu_notifier *mn; - struct hlist_node *n; + int id; - rcu_read_lock(); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->invalidate_page) mn->ops->invalidate_page(mn, mm, address); } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); } void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) { struct mmu_notifier *mn; - struct hlist_node *n; + int id; - rcu_read_lock(); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->invalidate_range_start) mn->ops->invalidate_range_start(mn, mm, start, end); } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); } +EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start); void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, unsigned long start, unsigned long end) { struct mmu_notifier *mn; - struct hlist_node *n; + int id; - rcu_read_lock(); - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { if (mn->ops->invalidate_range_end) mn->ops->invalidate_range_end(mn, mm, start, end); } - rcu_read_unlock(); + srcu_read_unlock(&srcu, id); } +EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); static int do_mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm, @@ -171,6 +189,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, BUG_ON(atomic_read(&mm->mm_users) <= 0); + /* + * Verify that mmu_notifier_init() already run and the global srcu is + * initialized. + */ + BUG_ON(!srcu.per_cpu_ref); + ret = -ENOMEM; mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); if (unlikely(!mmu_notifier_mm)) @@ -180,11 +204,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, down_write(&mm->mmap_sem); ret = mm_take_all_locks(mm); if (unlikely(ret)) - goto out_cleanup; + goto out_clean; if (!mm_has_notifiers(mm)) { INIT_HLIST_HEAD(&mmu_notifier_mm->list); spin_lock_init(&mmu_notifier_mm->lock); + mm->mmu_notifier_mm = mmu_notifier_mm; mmu_notifier_mm = NULL; } @@ -203,10 +228,9 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, spin_unlock(&mm->mmu_notifier_mm->lock); mm_drop_all_locks(mm); -out_cleanup: +out_clean: if (take_mmap_sem) up_write(&mm->mmap_sem); - /* kfree() does nothing if mmu_notifier_mm is NULL */ kfree(mmu_notifier_mm); out: BUG_ON(atomic_read(&mm->mm_users) <= 0); @@ -253,8 +277,8 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm) /* * This releases the mm_count pin automatically and frees the mm * structure if it was the last user of it. It serializes against - * running mmu notifiers with RCU and against mmu_notifier_unregister - * with the unregister lock + RCU. All sptes must be dropped before + * running mmu notifiers with SRCU and against mmu_notifier_unregister + * with the unregister lock + SRCU. All sptes must be dropped before * calling mmu_notifier_unregister. ->release or any other notifier * method may be invoked concurrently with mmu_notifier_unregister, * and only after mmu_notifier_unregister returned we're guaranteed @@ -264,35 +288,45 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) { BUG_ON(atomic_read(&mm->mm_count) <= 0); - spin_lock(&mm->mmu_notifier_mm->lock); if (!hlist_unhashed(&mn->hlist)) { - hlist_del_rcu(&mn->hlist); - /* - * RCU here will force exit_mmap to wait ->release to finish - * before freeing the pages. + * SRCU here will force exit_mmap to wait for ->release to + * finish before freeing the pages. */ - rcu_read_lock(); - spin_unlock(&mm->mmu_notifier_mm->lock); + int id; + + id = srcu_read_lock(&srcu); /* - * exit_mmap will block in mmu_notifier_release to - * guarantee ->release is called before freeing the - * pages. + * exit_mmap will block in mmu_notifier_release to guarantee + * that ->release is called before freeing the pages. */ if (mn->ops->release) mn->ops->release(mn, mm); - rcu_read_unlock(); - } else + srcu_read_unlock(&srcu, id); + + spin_lock(&mm->mmu_notifier_mm->lock); + /* + * Can not use list_del_rcu() since __mmu_notifier_release + * can delete it before we hold the lock. + */ + hlist_del_init_rcu(&mn->hlist); spin_unlock(&mm->mmu_notifier_mm->lock); + } /* - * Wait any running method to finish, of course including - * ->release if it was run by mmu_notifier_relase instead of us. + * Wait for any running method to finish, of course including + * ->release if it was run by mmu_notifier_release instead of us. */ - synchronize_rcu(); + synchronize_srcu(&srcu); BUG_ON(atomic_read(&mm->mm_count) <= 0); mmdrop(mm); } EXPORT_SYMBOL_GPL(mmu_notifier_unregister); + +static int __init mmu_notifier_init(void) +{ + return init_srcu_struct(&srcu); +} +subsys_initcall(mmu_notifier_init); |
