aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/page_isolation.c8
3 files changed, 6 insertions, 6 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 55d7c8026ab..d84c5e5331b 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1265,7 +1265,7 @@ void memory_failure_queue(unsigned long pfn, int trapno, int flags)
if (kfifo_put(&mf_cpu->fifo, &entry))
schedule_work_on(smp_processor_id(), &mf_cpu->work);
else
- pr_err("Memory failure: buffer overflow when queuing memory failure at 0x%#lx\n",
+ pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
pfn);
spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
put_cpu_var(memory_failure_cpu);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b100255dedd..c2b59dbda19 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -768,7 +768,7 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
}
#ifdef CONFIG_CMA
-/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
+/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
void __init init_cma_reserved_pageblock(struct page *page)
{
unsigned i = pageblock_nr_pages;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 383bdbb98b0..0cee10ffb98 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -226,9 +226,9 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int ret;
/*
- * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
- * is not aligned to pageblock_nr_pages.
- * Then we just check pagetype fist.
+ * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
+ * are not aligned to pageblock_nr_pages.
+ * Then we just check migratetype first.
*/
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
@@ -238,7 +238,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
page = __first_valid_page(start_pfn, end_pfn - start_pfn);
if ((pfn < end_pfn) || !page)
return -EBUSY;
- /* Check all pages are free or Marked as ISOLATED */
+ /* Check all pages are free or marked as ISOLATED */
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,