aboutsummaryrefslogtreecommitdiff
path: root/mm/mmu_notifier.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmu_notifier.c')
-rw-r--r--mm/mmu_notifier.c98
1 files changed, 47 insertions, 51 deletions
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 479a1e751a7..41cefdf0aad 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -37,7 +37,6 @@ static struct srcu_struct srcu;
void __mmu_notifier_release(struct mm_struct *mm)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int id;
/*
@@ -45,13 +44,12 @@ void __mmu_notifier_release(struct mm_struct *mm)
* ->release returns.
*/
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
/*
- * if ->release runs before mmu_notifier_unregister it
- * must be handled as it's the only way for the driver
- * to flush all existing sptes and stop the driver
- * from establishing any more sptes before all the
- * pages in the mm are freed.
+ * If ->release runs before mmu_notifier_unregister it must be
+ * handled, as it's the only way for the driver to flush all
+ * existing sptes and stop the driver from establishing any more
+ * sptes before all the pages in the mm are freed.
*/
if (mn->ops->release)
mn->ops->release(mn, mm);
@@ -64,22 +62,22 @@ void __mmu_notifier_release(struct mm_struct *mm)
hlist);
/*
* We arrived before mmu_notifier_unregister so
- * mmu_notifier_unregister will do nothing other than
- * to wait ->release to finish and
- * mmu_notifier_unregister to return.
+ * mmu_notifier_unregister will do nothing other than to wait
+ * for ->release to finish and for mmu_notifier_unregister to
+ * return.
*/
hlist_del_init_rcu(&mn->hlist);
}
spin_unlock(&mm->mmu_notifier_mm->lock);
/*
- * synchronize_srcu here prevents mmu_notifier_release to
- * return to exit_mmap (which would proceed freeing all pages
- * in the mm) until the ->release method returns, if it was
- * invoked by mmu_notifier_unregister.
+ * synchronize_srcu here prevents mmu_notifier_release from returning to
+ * exit_mmap (which would proceed with freeing all pages in the mm)
+ * until the ->release method returns, if it was invoked by
+ * mmu_notifier_unregister.
*
- * The mmu_notifier_mm can't go away from under us because one
- * mm_count is hold by exit_mmap.
+ * The mmu_notifier_mm can't go away from under us because one mm_count
+ * is held by exit_mmap.
*/
synchronize_srcu(&srcu);
}
@@ -93,11 +91,10 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long address)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int young = 0, id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->clear_flush_young)
young |= mn->ops->clear_flush_young(mn, mm, address);
}
@@ -110,11 +107,10 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int young = 0, id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->test_young) {
young = mn->ops->test_young(mn, mm, address);
if (young)
@@ -130,11 +126,10 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
pte_t pte)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->change_pte)
mn->ops->change_pte(mn, mm, address, pte);
}
@@ -145,11 +140,10 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_page)
mn->ops->invalidate_page(mn, mm, address);
}
@@ -160,31 +154,31 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_range_start)
mn->ops->invalidate_range_start(mn, mm, start, end);
}
srcu_read_unlock(&srcu, id);
}
+EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct mmu_notifier *mn;
- struct hlist_node *n;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_range_end)
mn->ops->invalidate_range_end(mn, mm, start, end);
}
srcu_read_unlock(&srcu, id);
}
+EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
static int do_mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm,
@@ -196,28 +190,28 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
BUG_ON(atomic_read(&mm->mm_users) <= 0);
/*
- * Verify that mmu_notifier_init() already run and the global srcu is
- * initialized.
- */
+ * Verify that mmu_notifier_init() already run and the global srcu is
+ * initialized.
+ */
BUG_ON(!srcu.per_cpu_ref);
+ ret = -ENOMEM;
+ mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+ if (unlikely(!mmu_notifier_mm))
+ goto out;
+
if (take_mmap_sem)
down_write(&mm->mmap_sem);
ret = mm_take_all_locks(mm);
if (unlikely(ret))
- goto out;
+ goto out_clean;
if (!mm_has_notifiers(mm)) {
- mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
- GFP_KERNEL);
- if (unlikely(!mmu_notifier_mm)) {
- ret = -ENOMEM;
- goto out_of_mem;
- }
INIT_HLIST_HEAD(&mmu_notifier_mm->list);
spin_lock_init(&mmu_notifier_mm->lock);
mm->mmu_notifier_mm = mmu_notifier_mm;
+ mmu_notifier_mm = NULL;
}
atomic_inc(&mm->mm_count);
@@ -233,12 +227,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
spin_unlock(&mm->mmu_notifier_mm->lock);
-out_of_mem:
mm_drop_all_locks(mm);
-out:
+out_clean:
if (take_mmap_sem)
up_write(&mm->mmap_sem);
-
+ kfree(mmu_notifier_mm);
+out:
BUG_ON(atomic_read(&mm->mm_users) <= 0);
return ret;
}
@@ -296,29 +290,32 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
if (!hlist_unhashed(&mn->hlist)) {
/*
- * SRCU here will force exit_mmap to wait ->release to finish
- * before freeing the pages.
+ * SRCU here will force exit_mmap to wait for ->release to
+ * finish before freeing the pages.
*/
int id;
id = srcu_read_lock(&srcu);
/*
- * exit_mmap will block in mmu_notifier_release to
- * guarantee ->release is called before freeing the
- * pages.
+ * exit_mmap will block in mmu_notifier_release to guarantee
+ * that ->release is called before freeing the pages.
*/
if (mn->ops->release)
mn->ops->release(mn, mm);
srcu_read_unlock(&srcu, id);
spin_lock(&mm->mmu_notifier_mm->lock);
- hlist_del_rcu(&mn->hlist);
+ /*
+ * Can not use list_del_rcu() since __mmu_notifier_release
+ * can delete it before we hold the lock.
+ */
+ hlist_del_init_rcu(&mn->hlist);
spin_unlock(&mm->mmu_notifier_mm->lock);
}
/*
- * Wait any running method to finish, of course including
- * ->release if it was run by mmu_notifier_relase instead of us.
+ * Wait for any running method to finish, of course including
+ * ->release if it was run by mmu_notifier_release instead of us.
*/
synchronize_srcu(&srcu);
@@ -332,5 +329,4 @@ static int __init mmu_notifier_init(void)
{
return init_srcu_struct(&srcu);
}
-
-module_init(mmu_notifier_init);
+subsys_initcall(mmu_notifier_init);