aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndreas Blaesius <skate4life@gmx.de>2016-06-12 00:10:11 +0200
committerSimon Shields <keepcalm444@gmail.com>2016-06-12 21:19:23 +1000
commitc47282825bbde9f692d61b50dfd9f0f8f51afdf2 (patch)
tree8fb6c1beca6085c390d2783b751b75246bb4994b /mm
parenta0e971623856a8ebda21521e32f833e37b8f1462 (diff)
downloadkernel_samsung_smdk4412-c47282825bbde9f692d61b50dfd9f0f8f51afdf2.tar.gz
kernel_samsung_smdk4412-c47282825bbde9f692d61b50dfd9f0f8f51afdf2.tar.bz2
kernel_samsung_smdk4412-c47282825bbde9f692d61b50dfd9f0f8f51afdf2.zip
Revert "Add ZRAM_FOR_ANDROID"
Change-Id: I6aff6a484dd94730f2032ceb838e0741ca6fa878
Diffstat (limited to 'mm')
-rw-r--r--mm/shmem.c9
-rw-r--r--mm/swapfile.c153
-rw-r--r--mm/vmscan.c78
3 files changed, 0 insertions, 240 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index a858b67b426..bcfa97dcc0a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1060,17 +1060,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
info = SHMEM_I(inode);
if (info->flags & VM_LOCKED)
goto redirty;
-#ifdef CONFIG_ZRAM_FOR_ANDROID
- /*
- * Modification for compcache
- * shmem_writepage can be reason of kernel panic when using swap.
- * This modification prevent using swap by shmem.
- */
- goto redirty;
-#else
if (!total_swap_pages)
goto redirty;
-#endif
/*
* shmem_backing_dev_info's capabilities prevent regular writeback or
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 3e5a3a71ad7..c8f4338848d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2012,159 +2012,6 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
return nr_extents;
}
-#ifdef CONFIG_ZRAM_FOR_ANDROID
-int swapon(char *name, int swap_flags)
-{
- struct swap_info_struct *p;
-
- struct file *swap_file = NULL;
- struct address_space *mapping;
- int i;
- int prio;
- int error;
- union swap_header *swap_header;
- int nr_extents;
- sector_t span;
- unsigned long maxpages;
- unsigned char *swap_map = NULL;
- struct page *page = NULL;
- struct inode *inode = NULL;
-
- p = alloc_swap_info();
- if (IS_ERR(p))
- return PTR_ERR(p);
-
- swap_file = filp_open(name, O_RDWR | O_LARGEFILE, 0);
- if (IS_ERR(swap_file)) {
- error = PTR_ERR(swap_file);
- swap_file = NULL;
- printk("zfqin, filp_open failed\n");
- goto bad_swap;
- }
-
- printk("zfqin, filp_open succeeded\n");
- p->swap_file = swap_file;
- mapping = swap_file->f_mapping;
-
- for (i = 0; i < nr_swapfiles; i++) {
- struct swap_info_struct *q = swap_info[i];
-
- if (q == p || !q->swap_file)
- continue;
- if (mapping == q->swap_file->f_mapping) {
- error = -EBUSY;
- goto bad_swap;
- }
- }
-
- inode = mapping->host;
- /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
- error = claim_swapfile(p, inode);
- if (unlikely(error))
- goto bad_swap;
-
- /*
- * Read the swap header.
- */
- if (!mapping->a_ops->readpage) {
- error = -EINVAL;
- goto bad_swap;
- }
- page = read_mapping_page(mapping, 0, swap_file);
- if (IS_ERR(page)) {
- error = PTR_ERR(page);
- goto bad_swap;
- }
- swap_header = kmap(page);
-
- maxpages = read_swap_header(p, swap_header, inode);
- if (unlikely(!maxpages)) {
- error = -EINVAL;
- goto bad_swap;
- }
-
- /* OK, set up the swap map and apply the bad block list */
- swap_map = vzalloc(maxpages);
- if (!swap_map) {
- error = -ENOMEM;
- goto bad_swap;
- }
-
- error = swap_cgroup_swapon(p->type, maxpages);
- if (error)
- goto bad_swap;
-
- nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
- maxpages, &span);
- if (unlikely(nr_extents < 0)) {
- error = nr_extents;
- goto bad_swap;
- }
-
- if (p->bdev) {
- if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
- p->flags |= SWP_SOLIDSTATE;
- p->cluster_next = 1 + (random32() % p->highest_bit);
- }
- if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
- p->flags |= SWP_DISCARDABLE;
- }
-
- mutex_lock(&swapon_mutex);
- prio = -1;
- if (swap_flags & SWAP_FLAG_PREFER)
- prio =
- (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
- enable_swap_info(p, prio, swap_map);
-
- printk(KERN_INFO "Adding %uk swap on %s. "
- "Priority:%d extents:%d across:%lluk %s%s\n",
- p->pages << (PAGE_SHIFT - 10), name, p->prio,
- nr_extents, (unsigned long long)span << (PAGE_SHIFT - 10),
- (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
- (p->flags & SWP_DISCARDABLE) ? "D" : "");
-
- mutex_unlock(&swapon_mutex);
- atomic_inc(&proc_poll_event);
- wake_up_interruptible(&proc_poll_wait);
-
- if (S_ISREG(inode->i_mode))
- inode->i_flags |= S_SWAPFILE;
- error = 0;
- goto out;
- bad_swap:
- if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
- set_blocksize(p->bdev, p->old_block_size);
- blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
- }
- destroy_swap_extents(p);
- swap_cgroup_swapoff(p->type);
- spin_lock(&swap_lock);
- p->swap_file = NULL;
- p->flags = 0;
- spin_unlock(&swap_lock);
- vfree(swap_map);
- if (swap_file) {
- if (inode && S_ISREG(inode->i_mode)) {
- mutex_unlock(&inode->i_mutex);
- inode = NULL;
- }
- filp_close(swap_file, NULL);
- }
- out:
- if (page && !IS_ERR(page)) {
- kunmap(page);
- page_cache_release(page);
- }
-
- if (inode && S_ISREG(inode->i_mode))
- mutex_unlock(&inode->i_mutex);
- return error;
-}
-
-EXPORT_SYMBOL(swapon);
-#endif /* CONFIG_ZRAM_FOR_ANDROID */
-
SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
struct swap_info_struct *p;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 99082fa9559..08f11e206ab 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -770,9 +770,6 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
/*
* shrink_page_list() returns the number of reclaimed pages
*/
-#ifndef CONFIG_ZRAM_FOR_ANDROID
-static
-#endif /* CONFIG_ZRAM_FOR_ANDROID */
unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone,
struct scan_control *sc)
@@ -1279,9 +1276,6 @@ static unsigned long isolate_pages_global(unsigned long nr,
* clear_active_flags() is a helper for shrink_active_list(), clearing
* any active bits from the pages in the list.
*/
-#ifndef CONFIG_ZRAM_FOR_ANDROID
-static
-#endif /* CONFIG_ZRAM_FOR_ANDROID */
unsigned long clear_active_flags(struct list_head *page_list,
unsigned int *count)
{
@@ -1352,40 +1346,6 @@ int isolate_lru_page(struct page *page)
return ret;
}
-#ifdef CONFIG_ZRAM_FOR_ANDROID
-/**
- * isolate_lru_page_compcache - tries to isolate a page for compcache
- * @page: page to isolate from its LRU list
- *
- * Isolates a @page from an LRU list, clears PageLRU,but
- * does not adjusts the vmstat statistic
- * Returns 0 if the page was removed from an LRU list.
- * Returns -EBUSY if the page was not on an LRU list.
- */
-int isolate_lru_page_compcache(struct page *page)
-{
- int ret = -EBUSY;
-
- VM_BUG_ON(!page_count(page));
-
- if (PageLRU(page)) {
- struct zone *zone = page_zone(page);
-
- spin_lock_irq(&zone->lru_lock);
- if (PageLRU(page)) {
- int lru = page_lru(page);
- ret = 0;
- get_page(page);
- ClearPageLRU(page);
- list_del(&page->lru);
- mem_cgroup_del_lru_list(page, lru);
- }
- spin_unlock_irq(&zone->lru_lock);
- }
- return ret;
-}
-#endif
-
/*
* Are there way too many processes in the direct reclaim path already?
*/
@@ -1622,44 +1582,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
return nr_reclaimed;
}
-#ifdef CONFIG_ZRAM_FOR_ANDROID
-unsigned long
-zone_id_shrink_pagelist(struct zone *zone, struct list_head *page_list)
-{
- unsigned long nr_reclaimed = 0;
- unsigned long nr_anon;
- unsigned long nr_file;
-
- struct scan_control sc = {
- .gfp_mask = GFP_USER,
- .may_writepage = 1,
- .nr_to_reclaim = SWAP_CLUSTER_MAX,
- .may_unmap = 1,
- .may_swap = 1,
- .swappiness = vm_swappiness,
- .order = 0,
- .mem_cgroup = NULL,
- .nodemask = NULL,
- };
-
- spin_lock_irq(&zone->lru_lock);
-
- update_isolated_counts(zone, &sc, &nr_anon, &nr_file, page_list);
-
- spin_unlock_irq(&zone->lru_lock);
-
- nr_reclaimed = shrink_page_list(page_list, zone, &sc);
-
- __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
-
- putback_lru_pages(zone, &sc, nr_anon, nr_file, page_list);
-
- return nr_reclaimed;
-}
-
-EXPORT_SYMBOL(zone_id_shrink_pagelist);
-#endif /* CONFIG_ZRAM_FOR_ANDROID */
-
/*
* This moves pages from the active list to the inactive list.
*