From a9cd410a3d296846a8125aa43d97a573a354c472 Mon Sep 17 00:00:00 2001 From: Arun KS Date: Tue, 5 Mar 2019 15:42:14 -0800 Subject: mm/page_alloc.c: memory hotplug: free pages as higher order When freeing pages are done with higher order, time spent on coalescing pages by buddy allocator can be reduced. With section size of 256MB, hot add latency of a single section shows improvement from 50-60 ms to less than 1 ms, hence improving the hot add latency by 60 times. Modify external providers of online callback to align with the change. [arunks@codeaurora.org: v11] Link: http://lkml.kernel.org/r/1547792588-18032-1-git-send-email-arunks@codeaurora.org [akpm@linux-foundation.org: remove unused local, per Arun] [akpm@linux-foundation.org: avoid return of void-returning __free_pages_core(), per Oscar] [akpm@linux-foundation.org: fix it for mm-convert-totalram_pages-and-totalhigh_pages-variables-to-atomic.patch] [arunks@codeaurora.org: v8] Link: http://lkml.kernel.org/r/1547032395-24582-1-git-send-email-arunks@codeaurora.org [arunks@codeaurora.org: v9] Link: http://lkml.kernel.org/r/1547098543-26452-1-git-send-email-arunks@codeaurora.org Link: http://lkml.kernel.org/r/1538727006-5727-1-git-send-email-arunks@codeaurora.org Signed-off-by: Arun KS Reviewed-by: Andrew Morton Acked-by: Michal Hocko Reviewed-by: Oscar Salvador Reviewed-by: Alexander Duyck Cc: K. Y. Srinivasan Cc: Haiyang Zhang Cc: Stephen Hemminger Cc: Boris Ostrovsky Cc: Juergen Gross Cc: Dan Williams Cc: Vlastimil Babka Cc: Joonsoo Kim Cc: Greg Kroah-Hartman Cc: Mathieu Malaterre Cc: "Kirill A. Shutemov" Cc: Souptick Joarder Cc: Mel Gorman Cc: Aaron Lu Cc: Srivatsa Vaddagiri Cc: Vinayak Menon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) (limited to 'mm/memory_hotplug.c') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 1ad28323fb9f..4f07c8ddfdd7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -47,7 +47,7 @@ * and restore_online_page_callback() for generic callback restore. */ -static void generic_online_page(struct page *page); +static void generic_online_page(struct page *page, unsigned int order); static online_page_callback_t online_page_callback = generic_online_page; static DEFINE_MUTEX(online_page_callback_lock); @@ -656,26 +656,39 @@ void __online_page_free(struct page *page) } EXPORT_SYMBOL_GPL(__online_page_free); -static void generic_online_page(struct page *page) +static void generic_online_page(struct page *page, unsigned int order) { - __online_page_set_limits(page); - __online_page_increment_counters(page); - __online_page_free(page); + __free_pages_core(page, order); + totalram_pages_add(1UL << order); +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page)) + totalhigh_pages_add(1UL << order); +#endif +} + +static int online_pages_blocks(unsigned long start, unsigned long nr_pages) +{ + unsigned long end = start + nr_pages; + int order, onlined_pages = 0; + + while (start < end) { + order = min(MAX_ORDER - 1, + get_order(PFN_PHYS(end) - PFN_PHYS(start))); + (*online_page_callback)(pfn_to_page(start), order); + + onlined_pages += (1UL << order); + start += (1UL << order); + } + return onlined_pages; } static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, void *arg) { - unsigned long i; unsigned long onlined_pages = *(unsigned long *)arg; - struct page *page; if (PageReserved(pfn_to_page(start_pfn))) - for (i = 0; i < nr_pages; i++) { - page = pfn_to_page(start_pfn + i); - (*online_page_callback)(page); - onlined_pages++; - } + onlined_pages += online_pages_blocks(start_pfn, nr_pages); online_mem_sections(start_pfn, start_pfn + nr_pages); -- cgit v1.2.3 From 98fa15f34cb379864757670b8e8743b21456a20e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 5 Mar 2019 15:42:58 -0800 Subject: mm: replace all open encodings for NUMA_NO_NODE Patch series "Replace all open encodings for NUMA_NO_NODE", v3. All these places for replacement were found by running the following grep patterns on the entire kernel code. Please let me know if this might have missed some instances. This might also have replaced some false positives. I will appreciate suggestions, inputs and review. 1. git grep "nid == -1" 2. git grep "node == -1" 3. git grep "nid = -1" 4. git grep "node = -1" This patch (of 2): At present there are multiple places where invalid node number is encoded as -1. Even though implicitly understood it is always better to have macros in there. Replace these open encodings for an invalid node number with the global macro NUMA_NO_NODE. This helps remove NUMA related assumptions like 'invalid node' from various places redirecting them to a common definition. Link: http://lkml.kernel.org/r/1545127933-10711-2-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: David Hildenbrand Acked-by: Jeff Kirsher [ixgbe] Acked-by: Jens Axboe [mtip32xx] Acked-by: Vinod Koul [dmaengine.c] Acked-by: Michael Ellerman [powerpc] Acked-by: Doug Ledford [drivers/infiniband] Cc: Joseph Qi Cc: Hans Verkuil Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm/memory_hotplug.c') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 4f07c8ddfdd7..b3d3c64d15df 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -702,9 +702,9 @@ static void node_states_check_changes_online(unsigned long nr_pages, { int nid = zone_to_nid(zone); - arg->status_change_nid = -1; - arg->status_change_nid_normal = -1; - arg->status_change_nid_high = -1; + arg->status_change_nid = NUMA_NO_NODE; + arg->status_change_nid_normal = NUMA_NO_NODE; + arg->status_change_nid_high = NUMA_NO_NODE; if (!node_state(nid, N_MEMORY)) arg->status_change_nid = nid; @@ -1509,9 +1509,9 @@ static void node_states_check_changes_offline(unsigned long nr_pages, unsigned long present_pages = 0; enum zone_type zt; - arg->status_change_nid = -1; - arg->status_change_nid_normal = -1; - arg->status_change_nid_high = -1; + arg->status_change_nid = NUMA_NO_NODE; + arg->status_change_nid_normal = NUMA_NO_NODE; + arg->status_change_nid_high = NUMA_NO_NODE; /* * Check whether node_states[N_NORMAL_MEMORY] will be changed. -- cgit v1.2.3 From c52e75935f8ded2bd4a75eb08e914bd96802725b Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 5 Mar 2019 15:44:01 -0800 Subject: mm: remove extra drain pages on pcp list In the current implementation, there are two places to isolate a range of page: __offline_pages() and alloc_contig_range(). During this procedure, it will drain pages on pcp list. Below is a brief call flow: __offline_pages()/alloc_contig_range() start_isolate_page_range() set_migratetype_isolate() drain_all_pages() drain_all_pages() <--- A This snippet shows the current logic is isolate and drain pcp list for each pageblock and drain pcp list again for the whole range. start_isolate_page_range is responsible for isolating the given pfn range. One part of that job is to make sure that also pages that are on the allocator pcp lists are properly isolated. Otherwise they could be reused and the range wouldn't be completely isolated until the memory is freed back. While there is no strict guarantee here because pages might get allocated at any time before drain_all_pages is called there doesn't seem to be any strong demand for such a guarantee. In any case, draining is already done at the isolation level and there is no need to do it again later by start_isolate_page_range callers (memory hotplug and CMA allocator currently). Therefore remove pointless draining in existing callers to make the code more clear and functionally correct. [mhocko@suse.com: provide a clearer changelog for the last two paragraphs] Link: http://lkml.kernel.org/r/20190105233141.2329-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang Acked-by: Michal Hocko Acked-by: David Hildenbrand Reviewed-by: Oscar Salvador Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm/memory_hotplug.c') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b3d3c64d15df..eb1a28469b66 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1625,7 +1625,6 @@ static int __ref __offline_pages(unsigned long start_pfn, cond_resched(); lru_add_drain_all(); - drain_all_pages(zone); pfn = scan_movable_pages(pfn, end_pfn); if (pfn) { -- cgit v1.2.3 From daf3538ad5a4800507b13bea6f37601da9cc28d5 Mon Sep 17 00:00:00 2001 From: Oscar Salvador Date: Tue, 5 Mar 2019 15:48:53 -0800 Subject: mm,memory_hotplug: explicitly pass the head to isolate_huge_page isolate_huge_page() expects we pass the head of hugetlb page to it: bool isolate_huge_page(...) { ... VM_BUG_ON_PAGE(!PageHead(page), page); ... } While I really cannot think of any situation where we end up with a non-head page between hands in do_migrate_range(), let us make sure the code is as sane as possible by explicitly passing the Head. Since we already got the pointer, it does not take us extra effort. Link: http://lkml.kernel.org/r/20190208090604.975-1-osalvador@suse.de Signed-off-by: Oscar Salvador Reviewed-by: Andrew Morton Reviewed-by: David Hildenbrand Acked-by: Michal Hocko Cc: Anthony Yznaga Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/memory_hotplug.c') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index eb1a28469b66..ebc8c1e75355 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1378,12 +1378,12 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (PageHuge(page)) { struct page *head = compound_head(page); - pfn = page_to_pfn(head) + (1< PFN_SECTION_SHIFT) { ret = -EBUSY; break; } - isolate_huge_page(page, &source); + pfn = page_to_pfn(head) + (1< Date: Tue, 5 Mar 2019 15:49:57 -0800 Subject: mm/hotplug: fix an imbalance with DEBUG_PAGEALLOC When onlining a memory block with DEBUG_PAGEALLOC, it unmaps the pages in the block from kernel, However, it does not map those pages while offlining at the beginning. As the result, it triggers a panic below while onlining on ppc64le as it checks if the pages are mapped before unmapping. However, the imbalance exists for all arches where double-unmappings could happen. Therefore, let kernel map those pages in generic_online_page() before they have being freed into the page allocator for the first time where it will set the page count to one. On the other hand, it works fine during the boot, because at least for IBM POWER8, it does, early_setup early_init_mmu harsh__early_init_mmu htab_initialize [1] htab_bolt_mapping [2] where it effectively map all memblock regions just like kernel_map_linear_page(), so later mem_init() -> memblock_free_all() will unmap them just fine without any imbalance. On other arches without this imbalance checking, it still unmap them once at the most. [1] for_each_memblock(memory, reg) { base = (unsigned long)__va(reg->base); size = reg->size; DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", base, size, prot); BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), prot, mmu_linear_psize, mmu_kernel_ssize)); } [2] linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; kernel BUG at arch/powerpc/mm/hash_utils_64.c:1815! Oops: Exception in kernel mode, sig: 5 [#1] LE SMP NR_CPUS=256 DEBUG_PAGEALLOC NUMA pSeries CPU: 2 PID: 4298 Comm: bash Not tainted 5.0.0-rc7+ #15 NIP: c000000000062670 LR: c00000000006265c CTR: 0000000000000000 REGS: c0000005bf8a75b0 TRAP: 0700 Not tainted (5.0.0-rc7+) MSR: 800000000282b033 CR: 28422842 XER: 00000000 CFAR: c000000000804f44 IRQMASK: 1 NIP [c000000000062670] __kernel_map_pages+0x2e0/0x4f0 LR [c00000000006265c] __kernel_map_pages+0x2cc/0x4f0 Call Trace: __kernel_map_pages+0x2cc/0x4f0 free_unref_page_prepare+0x2f0/0x4d0 free_unref_page+0x44/0x90 __online_page_free+0x84/0x110 online_pages_range+0xc0/0x150 walk_system_ram_range+0xc8/0x120 online_pages+0x280/0x5a0 memory_subsys_online+0x1b4/0x270 device_online+0xc0/0xf0 state_store+0xc0/0x180 dev_attr_store+0x3c/0x60 sysfs_kf_write+0x70/0xb0 kernfs_fop_write+0x10c/0x250 __vfs_write+0x48/0x240 vfs_write+0xd8/0x210 ksys_write+0x70/0x120 system_call+0x5c/0x70 Link: http://lkml.kernel.org/r/20190301220814.97339-1-cai@lca.pw Signed-off-by: Qian Cai Reviewed-by: Andrew Morton Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman [powerpc] Cc: Michal Hocko Cc: Souptick Joarder Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm/memory_hotplug.c') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index ebc8c1e75355..6b05576fb4ec 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -658,6 +658,7 @@ EXPORT_SYMBOL_GPL(__online_page_free); static void generic_online_page(struct page *page, unsigned int order) { + kernel_map_pages(page, 1 << order, 1); __free_pages_core(page, order); totalram_pages_add(1UL << order); #ifdef CONFIG_HIGHMEM -- cgit v1.2.3