aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuden <luden@ghostmail.com>2016-02-13 23:54:06 +0100
committerZiyan <jaraidaniel@gmail.com>2016-05-01 23:35:56 +0200
commitc3f47a7451568ae3d3e33f75666fc37439794223 (patch)
tree3149c6015cd4157361a44d0fa863464d68d9be9d
parentab26843c057773f42f5b46e4e4a519b39707253e (diff)
downloadkernel_samsung_tuna-c3f47a7451568ae3d3e33f75666fc37439794223.tar.gz
kernel_samsung_tuna-c3f47a7451568ae3d3e33f75666fc37439794223.tar.bz2
kernel_samsung_tuna-c3f47a7451568ae3d3e33f75666fc37439794223.zip
Extra CMA debugging code.
Added extra CMA debugging logging into FS, compaction, isolation and migration code. This makes it easier to see which parts of the kernel are responsible for the most migration failures.
-rw-r--r--drivers/base/Kconfig8
-rw-r--r--fs/block_dev.c18
-rw-r--r--fs/ext4/inode.c40
-rw-r--r--fs/fuse/file.c18
-rw-r--r--fs/inode.c18
-rw-r--r--fs/ramfs/file-mmu.c18
-rw-r--r--fs/sysfs/inode.c18
-rw-r--r--include/linux/migrate.h3
-rw-r--r--include/linux/mm.h4
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/migrate.c168
-rw-r--r--mm/page_isolation.c17
-rw-r--r--mm/shmem.c16
13 files changed, 332 insertions, 21 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d03311d7a2e..23e3722728a 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -210,6 +210,14 @@ config CMA_DEBUG
processing calls such as dma_alloc_from_contiguous().
This option does not affect warning and error messages.
+config CMA_DEBUG_VERBOSE
+ bool "CMA verbose debug messages (DEVELOPMENT)"
+ depends on DEBUG_KERNEL
+ help
+ Turns on verbose debug messages in CMA. Note: might result
+ in lots of output, increased kernel buffer size via
+ CONFIG_LOG_BUF_SHIFT is recommended.
+
comment "Default contiguous memory area size:"
config CMA_SIZE_MBYTES
diff --git a/fs/block_dev.c b/fs/block_dev.c
index dba761bea04..e961d3f9308 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -26,6 +26,9 @@
#include <linux/log2.h>
#include <linux/kmemleak.h>
#include <asm/uaccess.h>
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+#include <linux/migrate.h>
+#endif
#include "internal.h"
struct bdev_inode {
@@ -1574,6 +1577,18 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
return try_to_free_buffers(page);
}
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+int blkdev_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc = fallback_migrate_page(mapping, newpage, page, mode);
+ if (rc) {
+ pr_err("blkdev_migrate_page: fallback_migrate_page failed with error %d\n", rc);
+ }
+ return rc;
+}
+#endif
+
static const struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage,
.writepage = blkdev_writepage,
@@ -1582,6 +1597,9 @@ static const struct address_space_operations def_blk_aops = {
.writepages = generic_writepages,
.releasepage = blkdev_releasepage,
.direct_IO = blkdev_direct_IO,
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = blkdev_migrate_page,
+#endif
};
const struct file_operations def_blk_fops = {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4c0193284e0..f16a955619f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -42,6 +42,9 @@
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+#include <linux/migrate.h>
+#endif
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -3868,6 +3871,28 @@ static int ext4_journalled_set_page_dirty(struct page *page)
return __set_page_dirty_nobuffers(page);
}
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+int ext4_journalled_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc = fallback_migrate_page(mapping, newpage, page, mode);
+ if (rc) {
+ pr_err("ext4_journalled_migrate_page: fallback_migrate_page failed with error %d\n", rc);
+ }
+ return rc;
+}
+
+int ext4_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc = buffer_migrate_page(mapping, newpage, page, mode);
+ if (rc) {
+ pr_err("ext4_migrate_page: buffer_migrate_page failed with error %d\n", rc);
+ }
+ return rc;
+}
+#endif
+
static const struct address_space_operations ext4_ordered_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
@@ -3878,7 +3903,11 @@ static const struct address_space_operations ext4_ordered_aops = {
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = ext4_migrate_page,
+#else
.migratepage = buffer_migrate_page,
+#endif
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
@@ -3893,7 +3922,11 @@ static const struct address_space_operations ext4_writeback_aops = {
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = ext4_migrate_page,
+#else
.migratepage = buffer_migrate_page,
+#endif
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
@@ -3908,6 +3941,9 @@ static const struct address_space_operations ext4_journalled_aops = {
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = ext4_journalled_migrate_page,
+#endif
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
@@ -3923,7 +3959,11 @@ static const struct address_space_operations ext4_da_aops = {
.invalidatepage = ext4_da_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = ext4_migrate_page,
+#else
.migratepage = buffer_migrate_page,
+#endif
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index bcd6fd9cb16..dc8dbe498df 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -18,6 +18,9 @@
/* Needed for lru_cache_add_lru() */
#include <linux/swap.h>
#endif
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+#include <linux/migrate.h>
+#endif
static const struct file_operations fuse_direct_io_file_operations;
@@ -2227,6 +2230,18 @@ static const struct file_operations fuse_direct_io_file_operations = {
/* no splice_read */
};
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+int fuse_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc = fallback_migrate_page(mapping, newpage, page, mode);
+ if (rc) {
+ pr_err("fuse_migrate_page: fallback_migrate_page failed with error %d\n", rc);
+ }
+ return rc;
+}
+#endif
+
static const struct address_space_operations fuse_file_aops = {
.readpage = fuse_readpage,
.writepage = fuse_writepage,
@@ -2236,6 +2251,9 @@ static const struct address_space_operations fuse_file_aops = {
.readpages = fuse_readpages,
.set_page_dirty = __set_page_dirty_nobuffers,
.bmap = fuse_bmap,
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = fuse_migrate_page,
+#endif
};
void fuse_init_file_inode(struct inode *inode)
diff --git a/fs/inode.c b/fs/inode.c
index 8f021d04f3a..325b890a5d4 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -26,6 +26,9 @@
#include <linux/ima.h>
#include <linux/cred.h>
#include <linux/buffer_head.h> /* for inode_has_buffers */
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+#include <linux/migrate.h>
+#endif
#include "internal.h"
/*
@@ -81,11 +84,26 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
*/
static DECLARE_RWSEM(iprune_sem);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+int empty_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc = fallback_migrate_page(mapping, newpage, page, mode);
+ if (rc) {
+ pr_err("empty_migrate_page: fallback_migrate_page failed with error %d\n", rc);
+ }
+ return rc;
+}
+#endif
+
/*
* Empty aops. Can be used for the cases where the user does not
* define any of the address_space operations.
*/
const struct address_space_operations empty_aops = {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = empty_migrate_page,
+#endif
};
EXPORT_SYMBOL(empty_aops);
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index 4884ac5ae9b..95856d458f1 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -27,14 +27,32 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/ramfs.h>
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+#include <linux/migrate.h>
+#endif
#include "internal.h"
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+int ramfs_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc = fallback_migrate_page(mapping, newpage, page, mode);
+ if (rc) {
+ pr_err("ramfs_migrate_page: fallback_migrate_page failed with error %d\n", rc);
+ }
+ return rc;
+}
+#endif
+
const struct address_space_operations ramfs_aops = {
.readpage = simple_readpage,
.write_begin = simple_write_begin,
.write_end = simple_write_end,
.set_page_dirty = __set_page_dirty_no_writeback,
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = ramfs_migrate_page,
+#endif
};
const struct file_operations ramfs_file_operations = {
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index a494413e486..09a3409a348 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -22,14 +22,32 @@
#include <linux/sysfs.h>
#include <linux/xattr.h>
#include <linux/security.h>
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+#include <linux/migrate.h>
+#endif
#include "sysfs.h"
extern struct super_block * sysfs_sb;
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+int sysfs_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc = fallback_migrate_page(mapping, newpage, page, mode);
+ if (rc) {
+ pr_err("sysfs_migrate_page: fallback_migrate_page failed with error %d\n", rc);
+ }
+ return rc;
+}
+#endif
+
static const struct address_space_operations sysfs_aops = {
.readpage = simple_readpage,
.write_begin = simple_write_begin,
.write_end = simple_write_end,
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = sysfs_migrate_page,
+#endif
};
static struct backing_dev_info sysfs_backing_dev_info = {
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index eaf867412f7..7b5848c4da5 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -25,6 +25,9 @@ enum migrate_mode {
extern void putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
+extern int fallback_migrate_page(struct address_space *,
+ struct page *, struct page *, enum migrate_mode);
+
extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
enum migrate_mode mode);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fa535c0177b..aee0e1186cd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1669,5 +1669,9 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned int pages_per_huge_page);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+void print_cma_page_stats(struct page *page);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/mm/compaction.c b/mm/compaction.c
index c2020010412..c1f27c0f519 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -328,8 +328,13 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
mode |= ISOLATE_ASYNC_MIGRATE;
/* Try isolate the page */
- if (__isolate_lru_page(page, mode, 0) != 0)
+ if (__isolate_lru_page(page, mode, 0) != 0) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("isolate_migratepages_range: failed to isolate LRU pfn 0x%lx, skipping\n", low_pfn);
+ print_cma_page_stats(page);
+#endif
continue;
+ }
VM_BUG_ON(PageTransCompound(page));
diff --git a/mm/migrate.c b/mm/migrate.c
index 76abb9ad067..e598fae088c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -300,8 +300,13 @@ static int migrate_page_move_mapping(struct address_space *mapping,
if (!mapping) {
/* Anonymous page without mapping */
- if (page_count(page) != 1)
+ if (page_count(page) != 1) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("migrate_page_move_mapping: page_count != 1\n");
+ print_cma_page_stats(page);
+#endif
return -EAGAIN;
+ }
return 0;
}
@@ -313,11 +318,19 @@ static int migrate_page_move_mapping(struct address_space *mapping,
expected_count = 2 + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("migrate_page_move_mapping: page_count != expected (%d)\n", expected_count);
+ print_cma_page_stats(page);
+#endif
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
if (!page_freeze_refs(page, expected_count)) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("migrate_page_move_mapping: couldn't freeze refs, expected = %d\n", expected_count);
+ print_cma_page_stats(page);
+#endif
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
@@ -331,6 +344,10 @@ static int migrate_page_move_mapping(struct address_space *mapping,
*/
if (mode == MIGRATE_ASYNC && head &&
!buffer_migrate_lock_buffers(head, mode)) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("migrate_page_move_mapping: failed to lock buffers in async mode\n");
+ print_cma_page_stats(page);
+#endif
page_unfreeze_refs(page, expected_count);
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
@@ -522,12 +539,26 @@ int buffer_migrate_page(struct address_space *mapping,
struct buffer_head *bh, *head;
int rc;
- if (!page_has_buffers(page))
- return migrate_page(mapping, newpage, page, mode);
+ if (!page_has_buffers(page)) {
+ rc = migrate_page(mapping, newpage, page, mode);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("buffer_migrate_page: migrate_page failed, result = %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ return rc;
+ }
head = page_buffers(page);
rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("buffer_migrate_page: migrate_page_move_mapping failed, result = %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
if (rc)
return rc;
@@ -614,14 +645,21 @@ static int writeout(struct address_space *mapping, struct page *page)
/*
* Default handling if a filesystem does not provide a migration function.
*/
-static int fallback_migrate_page(struct address_space *mapping,
+int fallback_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
+ int rc;
if (PageDirty(page)) {
- /* Only writeback pages in full synchronous migration */
if (mode != MIGRATE_SYNC)
return -EBUSY;
- return writeout(mapping, page);
+ rc = writeout(mapping, page);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("fallback_migrate_page: writeout failed with result %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ return rc;
}
/*
@@ -629,10 +667,16 @@ static int fallback_migrate_page(struct address_space *mapping,
* We must have no buffers or drop them.
*/
if (page_has_private(page) &&
- !try_to_release_page(page, GFP_KERNEL))
+ !try_to_release_page(page, GFP_KERNEL)) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("fallback_migrate_page: try_to_release_page failed\n");
+ print_cma_page_stats(page);
+#endif
return -EAGAIN;
+ }
- return migrate_page(mapping, newpage, page, mode);
+ rc = migrate_page(mapping, newpage, page, mode);
+ return rc;
}
/*
@@ -667,9 +711,16 @@ static int move_to_new_page(struct page *newpage, struct page *page,
SetPageSwapBacked(newpage);
mapping = page_mapping(page);
- if (!mapping)
+ if (!mapping) {
rc = migrate_page(mapping, newpage, page, mode);
- else if (mapping->a_ops->migratepage)
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("move_to_new_page failed in 'no mapping' case with error %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ }
+ else if (mapping->a_ops->migratepage) {
/*
* Most pages have a mapping and most filesystems provide a
* migratepage callback. Anonymous pages are part of swap
@@ -678,8 +729,22 @@ static int move_to_new_page(struct page *newpage, struct page *page,
*/
rc = mapping->a_ops->migratepage(mapping,
newpage, page, mode);
- else
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("move_to_new_page failed in 'ops->migratepage' case with error %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ }
+ else {
rc = fallback_migrate_page(mapping, newpage, page, mode);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("move_to_new_page failed in 'fallback_migrate_page' case with error %d\n", rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ }
if (rc) {
newpage->mapping = NULL;
@@ -703,6 +768,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
struct anon_vma *anon_vma = NULL;
if (!trylock_page(page)) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: failed to trylock_page, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
if (!force || mode == MIGRATE_ASYNC)
goto out;
@@ -719,8 +788,13 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* avoid the use of lock_page for direct compaction
* altogether.
*/
- if (current->flags & PF_MEMALLOC)
+ if (current->flags & PF_MEMALLOC) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: failed locking due to PF_MEMALLOC, flags = 0x%x\n", current->flags);
+ print_cma_page_stats(page);
+#endif
goto out;
+ }
lock_page(page);
}
@@ -735,6 +809,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* serializes that).
*/
if (PageKsm(page) && !offlining) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: PageKsm in non-offlining mode, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
rc = -EBUSY;
goto unlock;
}
@@ -742,6 +820,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
/* charge against new page */
charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL);
if (charge == -ENOMEM) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: failed to charge, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
rc = -ENOMEM;
goto unlock;
}
@@ -755,6 +837,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* the overhead of stalling is too much
*/
if (mode != MIGRATE_SYNC) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: in PageWriteback with ASYNC migration\n");
+ print_cma_page_stats(page);
+#endif
rc = -EBUSY;
goto uncharge;
}
@@ -762,6 +848,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
goto uncharge;
wait_on_page_writeback(page);
}
+
/*
* By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
* we cannot notice that anon_vma is freed while we migrates a page.
@@ -795,6 +882,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
*/
remap_swapcache = 0;
} else {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: in page_anon case, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
goto uncharge;
}
}
@@ -814,7 +905,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
if (!page->mapping) {
VM_BUG_ON(PageAnon(page));
if (page_has_private(page)) {
- try_to_free_buffers(page);
+ if (!try_to_free_buffers(page)) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: in !page_mapping/page_has_private case, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
+ }
goto uncharge;
}
goto skip_unmap;
@@ -824,8 +920,20 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
skip_unmap:
- if (!page_mapped(page))
+ if (!page_mapped(page)) {
rc = move_to_new_page(newpage, page, remap_swapcache, mode);
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc) {
+ pr_err("__unmap_and_move: move_to_new_page failed, mode = %d, force = %d, code = %d\n", mode, force, rc);
+ print_cma_page_stats(page);
+ }
+#endif
+ } else if (rc) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("__unmap_and_move: skip_unmap/page_mapped case, mode = %d, force = %d, code = %d\n", mode, force, rc);
+ print_cma_page_stats(page);
+#endif
+ }
if (rc && remap_swapcache)
remove_migration_ptes(page, page);
@@ -855,17 +963,31 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
int *result = NULL;
struct page *newpage = get_new_page(page, private, &result);
- if (!newpage)
+ if (!newpage) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("unmap_and_move: get_new_page failed, mode = %d, force = %d\n", mode, force);
+ print_cma_page_stats(page);
+#endif
return -ENOMEM;
+ }
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
goto out;
}
- if (unlikely(PageTransHuge(page)))
- if (unlikely(split_huge_page(page)))
+ if (unlikely(PageTransHuge(page))) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("unmap_and_move: calling split_huge_page\n");
+ print_cma_page_stats(page);
+#endif
+ if (unlikely(split_huge_page(page))) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ pr_err("unmap_and_move: split_huge_page failed\n");
+#endif
goto out;
+ }
+ }
rc = __unmap_and_move(page, newpage, force, offlining, mode);
out:
@@ -991,7 +1113,7 @@ int migrate_pages(struct list_head *from,
struct page *page;
struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE;
- int rc;
+ int rc = 0;
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
@@ -1021,6 +1143,11 @@ int migrate_pages(struct list_head *from,
}
}
}
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ if (rc != 0) {
+ pr_err("migrate_pages: iterations ended up at %d pass with return code %d\n", pass, rc);
+ }
+#endif
rc = 0;
out:
if (!swapwrite)
@@ -1041,7 +1168,7 @@ int migrate_huge_pages(struct list_head *from,
int pass = 0;
struct page *page;
struct page *page2;
- int rc;
+ int rc = 0;
for (pass = 0; pass < 10 && retry; pass++) {
retry = 0;
@@ -1068,6 +1195,9 @@ int migrate_huge_pages(struct list_head *from,
}
}
}
+ if (rc != 0) {
+ pr_err("migrate_pages: iterations ended up at %d pass with return code %d\n", pass, rc);
+ }
rc = 0;
out:
if (rc)
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index c9f04774f2b..fbf6f75a28e 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -5,8 +5,19 @@
#include <linux/mm.h>
#include <linux/page-isolation.h>
#include <linux/pageblock-flags.h>
+#include <linux/ksm.h>
#include "internal.h"
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+void print_cma_page_stats(struct page *page)
+{
+ pr_err("page: phys = 0x%x, count = %d, order = %lu, private = %ld\n", page_to_phys(page), page_count(page), page_order(page), page_private(page));
+ pr_err("page: buddy = %d, LRU = %d, KSM = %d, mapping = 0x%p, map count = %d\n", PageBuddy(page), PageLRU(page), PageKsm(page), page->mapping, atomic_read(&page->_mapcount));
+ pr_err("page: index = 0x%lx, anon = %d, writeback = %d, dirty = %d, swap cache = %d\n", page->index, PageAnon(page), PageWriteback(page), PageDirty(page), PageSwapCache(page));
+ pr_err("pageblock: migratetype = 0x%x, flags = 0x%lx\n", get_pageblock_migratetype(page), get_pageblock_flags(page));
+}
+#endif
+
static inline struct page *
__first_valid_page(unsigned long pfn, unsigned long nr_pages)
{
@@ -108,8 +119,12 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
else
break;
}
- if (pfn < end_pfn)
+ if (pfn < end_pfn) {
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ print_cma_page_stats(pfn_to_page(pfn));
+#endif
return 0;
+ }
return 1;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index bcfa97dcc0a..4bc852dccb9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2692,6 +2692,18 @@ static void destroy_inodecache(void)
kmem_cache_destroy(shmem_inode_cachep);
}
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+int shmem_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc = migrate_page(mapping, newpage, page, mode);
+ if (rc) {
+ pr_err("shmem_migrate_page: migrate_page failed with error %d\n", rc);
+ }
+ return rc;
+}
+#endif
+
static const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.set_page_dirty = __set_page_dirty_no_writeback,
@@ -2700,7 +2712,11 @@ static const struct address_space_operations shmem_aops = {
.write_begin = shmem_write_begin,
.write_end = shmem_write_end,
#endif
+#ifdef CONFIG_CMA_DEBUG_VERBOSE
+ .migratepage = shmem_migrate_page,
+#else
.migratepage = migrate_page,
+#endif
.error_remove_page = generic_error_remove_page,
};