aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLuden <luden@ghostmail.com>2016-02-13 23:54:06 +0100
committerZiyan <jaraidaniel@gmail.com>2016-05-01 23:35:56 +0200
commitc3f47a7451568ae3d3e33f75666fc37439794223 (patch)
tree3149c6015cd4157361a44d0fa863464d68d9be9d /mm
parentab26843c057773f42f5b46e4e4a519b39707253e (diff)
downloadkernel_samsung_tuna-c3f47a7451568ae3d3e33f75666fc37439794223.tar.gz
kernel_samsung_tuna-c3f47a7451568ae3d3e33f75666fc37439794223.tar.bz2
kernel_samsung_tuna-c3f47a7451568ae3d3e33f75666fc37439794223.zip
Extra CMA debugging code.
Added extra CMA debugging logging into FS, compaction, isolation and migration code. This makes it easier to see which parts of the kernel are responsible for the most migration failures.
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/migrate.c168
-rw-r--r--mm/page_isolation.c17
-rw-r--r--mm/shmem.c16
4 files changed, 187 insertions, 21 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index c2020010412..c1f27c0f519 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -328,8 +328,13 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
mode |= ISOLATE_ASYNC_MIGRATE;
/* Try isolate the page */
- if (__isolate_lru_page(page, mode, 0) != 0)
+ if (__isolate_lru_page(page, mode, 0) != 0) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("isolate_migratepages_range: failed to isolate LRU pfn 0x%lx, skipping\n", low_pfn);
+ print_cma_page_stats(page);
+#endif
continue;
+ }
VM_BUG_ON(PageTransCompound(page));
diff --git a/mm/migrate.c b/mm/migrate.c
index 76abb9ad067..e598fae088c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -300,8 +300,13 @@ static int migrate_page_move_mapping(struct address_space *mapping,
if (!mapping) {
/* Anonymous page without mapping */
- if (page_count(page) != 1)
+ if (page_count(page) != 1) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("migrate_page_move_mapping: page_count != 1\n");
+ print_cma_page_stats(page);
+#endif
return -EAGAIN;
+ }
return 0;
}
@@ -313,11 +318,19 @@ static int migrate_page_move_mapping(struct address_space *mapping,
expected_count = 2 + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("migrate_page_move_mapping: page_count != expected (%d)\n", expected_count);
+ print_cma_page_stats(page);
+#endif
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
if (!page_freeze_refs(page, expected_count)) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("migrate_page_move_mapping: couldn't freeze refs, expected = %d\n", expected_count);
+ print_cma_page_stats(page);
+#endif
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
@@ -331,6 +344,10 @@ static int migrate_page_move_mapping(struct address_space *mapping,
*/
if (mode == MIGRATE_ASYNC && head &&
!buffer_migrate_lock_buffers(head, mode)) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("migrate_page_move_mapping: failed to lock buffers in async mode\n");
+ print_cma_page_stats(page);
+#endif
page_unfreeze_refs(page, expected_count);
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
@@ -522,12 +539,26 @@ int buffer_migrate_page(struct address_space *mapping,
struct buffer_head *bh, *head;
int rc;
- if (!page_has_buffers(page))
- return migrate_page(mapping, newpage, page, mode);
+ if (!page_has_buffers(page)) {
+ rc = migrate_page(mapping, newpage, page, mode);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("buffer_migrate_page: migrate_page failed, result = %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ return rc;
+ }
head = page_buffers(page);
rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("buffer_migrate_page: migrate_page_move_mapping failed, result = %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
if (rc)
return rc;
@@ -614,14 +645,21 @@ static int writeout(struct address_space *mapping, struct page *page)
/*
* Default handling if a filesystem does not provide a migration function.
*/
-static int fallback_migrate_page(struct address_space *mapping,
+int fallback_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
+ int rc;
if (PageDirty(page)) {
- /* Only writeback pages in full synchronous migration */
if (mode != MIGRATE_SYNC)
return -EBUSY;
- return writeout(mapping, page);
+ rc = writeout(mapping, page);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("fallback_migrate_page: writeout failed with result %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ return rc;
}
/*
@@ -629,10 +667,16 @@ static int fallback_migrate_page(struct address_space *mapping,
* We must have no buffers or drop them.
*/
if (page_has_private(page) &&
- !try_to_release_page(page, GFP_KERNEL))
+ !try_to_release_page(page, GFP_KERNEL)) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("fallback_migrate_page: try_to_release_page failed\n");
+ print_cma_page_stats(page);
+#endif
return -EAGAIN;
+ }
- return migrate_page(mapping, newpage, page, mode);
+ rc = migrate_page(mapping, newpage, page, mode);
+ return rc;
}
/*
@@ -667,9 +711,16 @@ static int move_to_new_page(struct page *newpage, struct page *page,
SetPageSwapBacked(newpage);
mapping = page_mapping(page);
- if (!mapping)
+ if (!mapping) {
rc = migrate_page(mapping, newpage, page, mode);
- else if (mapping->a_ops->migratepage)
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("move_to_new_page failed in 'no mapping' case with error %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ }
+ else if (mapping->a_ops->migratepage) {
/*
* Most pages have a mapping and most filesystems provide a
* migratepage callback. Anonymous pages are part of swap
@@ -678,8 +729,22 @@ static int move_to_new_page(struct page *newpage, struct page *page,
*/
rc = mapping->a_ops->migratepage(mapping,
newpage, page, mode);
- else
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("move_to_new_page failed in 'ops->migratepage' case with error %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ }
+ else {
rc = fallback_migrate_page(mapping, newpage, page, mode);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("move_to_new_page failed in 'fallback_migrate_page' case with error %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ }
if (rc) {
newpage->mapping = NULL;
@@ -703,6 +768,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
struct anon_vma *anon_vma = NULL;
if (!trylock_page(page)) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: failed to trylock_page, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
if (!force || mode == MIGRATE_ASYNC)
goto out;
@@ -719,8 +788,13 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* avoid the use of lock_page for direct compaction
* altogether.
*/
- if (current->flags & PF_MEMALLOC)
+ if (current->flags & PF_MEMALLOC) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: failed locking due to PF_MEMALLOC, flags = 0x%x\n", current->flags);
+ print_cma_page_stats(page);
+#endif
goto out;
+ }
lock_page(page);
}
@@ -735,6 +809,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* serializes that).
*/
if (PageKsm(page) && !offlining) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: PageKsm in non-offlining mode, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
rc = -EBUSY;
goto unlock;
}
@@ -742,6 +820,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
/* charge against new page */
charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL);
if (charge == -ENOMEM) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: failed to charge, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
rc = -ENOMEM;
goto unlock;
}
@@ -755,6 +837,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* the overhead of stalling is too much
*/
if (mode != MIGRATE_SYNC) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: in PageWriteback with ASYNC migration\n");
+ print_cma_page_stats(page);
+#endif
rc = -EBUSY;
goto uncharge;
}
@@ -762,6 +848,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
goto uncharge;
wait_on_page_writeback(page);
}
+
/*
* By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
* we cannot notice that anon_vma is freed while we migrates a page.
@@ -795,6 +882,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
*/
remap_swapcache = 0;
} else {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: in page_anon case, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
goto uncharge;
}
}
@@ -814,7 +905,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
if (!page->mapping) {
VM_BUG_ON(PageAnon(page));
if (page_has_private(page)) {
- try_to_free_buffers(page);
+ if (!try_to_free_buffers(page)) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: in !page_mapping/page_has_private case, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
+ }
goto uncharge;
}
goto skip_unmap;
@@ -824,8 +920,20 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
skip_unmap:
- if (!page_mapped(page))
+ if (!page_mapped(page)) {
rc = move_to_new_page(newpage, page, remap_swapcache, mode);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("__unmap_and_move: move_to_new_page failed, mode = %d, force = %d, code = %d\n", mode, force, rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ } else if (rc) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: skip_unmap/page_mapped case, mode = %d, force = %d, code = %d\n", mode, force, rc);
+ print_cma_page_stats(page);
+#endif
+ }
if (rc && remap_swapcache)
remove_migration_ptes(page, page);
@@ -855,17 +963,31 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
int *result = NULL;
struct page *newpage = get_new_page(page, private, &result);
- if (!newpage)
+ if (!newpage) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("unmap_and_move: get_new_page failed, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
return -ENOMEM;
+ }
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
goto out;
}
- if (unlikely(PageTransHuge(page)))
- if (unlikely(split_huge_page(page)))
+ if (unlikely(PageTransHuge(page))) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("unmap_and_move: calling split_huge_page\n");
+ print_cma_page_stats(page);
+#endif
+ if (unlikely(split_huge_page(page))) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("unmap_and_move: split_huge_page failed\n");
+#endif
goto out;
+ }
+ }
rc = __unmap_and_move(page, newpage, force, offlining, mode);
out:
@@ -991,7 +1113,7 @@ int migrate_pages(struct list_head *from,
struct page *page;
struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE;
- int rc;
+ int rc = 0;
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
@@ -1021,6 +1143,11 @@ int migrate_pages(struct list_head *from,
}
}
}
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc != 0) {
+ pr_err("migrate_pages: iterations ended up at %d pass with return code %d\n", pass, rc);
+ }
+#endif
rc = 0;
out:
if (!swapwrite)
@@ -1041,7 +1168,7 @@ int migrate_huge_pages(struct list_head *from,
int pass = 0;
struct page *page;
struct page *page2;
- int rc;
+ int rc = 0;
for (pass = 0; pass < 10 && retry; pass++) {
retry = 0;
@@ -1068,6 +1195,9 @@ int migrate_huge_pages(struct list_head *from,
}
}
}
+ if (rc != 0) {
+ pr_err("migrate_pages: iterations ended up at %d pass with return code %d\n", pass, rc);
+ }
rc = 0;
out:
if (rc)
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index c9f04774f2b..fbf6f75a28e 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -5,8 +5,19 @@
#include <linux/mm.h>
#include <linux/page-isolation.h>
#include <linux/pageblock-flags.h>
+#include <linux/ksm.h>
#include "internal.h"
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+void print_cma_page_stats(struct page *page)
+{
+ pr_err("page: phys = 0x%x, count = %d, order = %lu, private = %ld\n", page_to_phys(page), page_count(page), page_order(page), page_private(page));
+ pr_err("page: buddy = %d, LRU = %d, KSM = %d, mapping = 0x%p, map count = %d\n", PageBuddy(page), PageLRU(page), PageKsm(page), page->mapping, atomic_read(&page->_mapcount));
+ pr_err("page: index = 0x%lx, anon = %d, writeback = %d, dirty = %d, swap cache = %d\n", page->index, PageAnon(page), PageWriteback(page), PageDirty(page), PageSwapCache(page));
+ pr_err("pageblock: migratetype = 0x%x, flags = 0x%lx\n", get_pageblock_migratetype(page), get_pageblock_flags(page));
+}
+#endif
+
static inline struct page *
__first_valid_page(unsigned long pfn, unsigned long nr_pages)
{
@@ -108,8 +119,12 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
else
break;
}
- if (pfn < end_pfn)
+ if (pfn < end_pfn) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ print_cma_page_stats(pfn_to_page(pfn));
+#endif
return 0;
+ }
return 1;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index bcfa97dcc0a..4bc852dccb9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2692,6 +2692,18 @@ static void destroy_inodecache(void)
kmem_cache_destroy(shmem_inode_cachep);
}
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+int shmem_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc = migrate_page(mapping, newpage, page, mode);
+ if (rc) {
+ pr_err("shmem_migrate_page: migrate_page failed with error %d\n", rc);
+ }
+ return rc;
+}
+#endif
+
static const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.set_page_dirty = __set_page_dirty_no_writeback,
@@ -2700,7 +2712,11 @@ static const struct address_space_operations shmem_aops = {
.write_begin = shmem_write_begin,
.write_end = shmem_write_end,
#endif
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = shmem_migrate_page,
+#else
.migratepage = migrate_page,
+#endif
.error_remove_page = generic_error_remove_page,
};