From ab08ebea9b12f8bda931d6107d540ab3c18f9a4e Mon Sep 17 00:00:00 2001 From: Simon Xue Date: Tue, 15 Apr 2025 10:46:04 +0800 Subject: [PATCH] mm: defer init highmem page when CONFIG_ROCKCHIP_THUNDER_BOOT_DEFER_FREE_MEMBLOCK=y Change-Id: I41527559ee66786826da7c3c9763804b91226bcf Signed-off-by: Simon Xue --- mm/internal.h | 5 +++++ mm/memblock.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++- mm/page_alloc.c | 30 ++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+), 1 deletion(-) diff --git a/mm/internal.h b/mm/internal.h index 2229c9198912..ac8795a89858 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -371,6 +371,11 @@ extern void memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order); #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT_DEFER_FREE_MEMBLOCK extern void __init rk_free_pages_core(struct page *page, unsigned int order); +extern unsigned long __init rk_deferred_init_pages(struct zone *zone, + unsigned long pfn, + unsigned long end_pfn); +extern bool __meminit rk_defer_init_hpages(int nid, unsigned long zone_idx, + unsigned long pfn, unsigned long end_pfn); #endif extern void __free_pages_core(struct page *page, unsigned int order); extern void prep_compound_page(struct page *page, unsigned int order); diff --git a/mm/memblock.c b/mm/memblock.c index c89b37563872..5358ce52ee3c 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -124,7 +124,19 @@ static int __init early_defer_free_block_size(char *p) } early_param("defer_free_block_size", early_defer_free_block_size); -#endif + +/* deferred highpage stuff */ +struct rk_defered_highpage_info { + unsigned long pfn_start; + unsigned long pfn_end; + struct zone *zone; +}; + +#define RK_DHPI_NUM_MAX 3 + +static struct rk_defered_highpage_info rk_dhpi[RK_DHPI_NUM_MAX]; +static int rk_dhpi_num; +#endif /* CONFIG_ROCKCHIP_THUNDER_BOOT_DEFER_FREE_MEMBLOCK */ unsigned long max_low_pfn; unsigned long min_low_pfn; @@ -2106,9 +2118,48 @@ static void __init rk_free_highpages(void) #endif } +bool __meminit rk_defer_init_hpages(int nid, unsigned long zone_idx, + unsigned long pfn, unsigned long end_pfn) +{ + struct pglist_data *pdata = NODE_DATA(nid); + struct zone *zone = &pdata->node_zones[zone_idx]; + + if (rk_dhpi_num >= RK_DHPI_NUM_MAX) { + pr_err("too much deferred page zone\n"); + return false; + } + + if (strstr(zone->name, "HighMem")) { + pr_debug("%s, zone : %s, pfn[0x%lx-0x%lx]\n", __func__, + zone->name, pfn, end_pfn); + rk_dhpi[rk_dhpi_num].pfn_start = pfn; + rk_dhpi[rk_dhpi_num].pfn_end = end_pfn; + rk_dhpi[rk_dhpi_num].zone = zone; + rk_dhpi_num++; + + return true; + } + return false; +} + int __init defer_free_memblock(void *unused) { int i; + unsigned long nr_pages = 0; + + pr_debug("%s, rk_dhpi_num = %d\n", __func__, rk_dhpi_num); + + for (i = 0; i < rk_dhpi_num; i++) { + struct zone *zone = rk_dhpi[i].zone; + unsigned long start_pfn = rk_dhpi[i].pfn_start; + unsigned long end_pfn = rk_dhpi[i].pfn_end; + + pr_info("%s, zone : %s, pfn[0x%lx-0x%lx]\n", __func__, + zone->name, start_pfn, end_pfn); + nr_pages += rk_deferred_init_pages(zone, start_pfn, end_pfn); + } + + pr_debug("%s, deferred_nr_page = 0x%lx\n", __func__, nr_pages); for (i = 0; i < db_count; i++) { pr_debug("%s: start = %ld, end = %ld\n", diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7ecce8b38753..e463adea6504 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1822,6 +1822,31 @@ void __init rk_free_pages_core(struct page *page, unsigned int order) totalhigh_pages_add(1 << order); #endif } + +unsigned long __init rk_deferred_init_pages(struct zone *zone, + unsigned long pfn, + unsigned long end_pfn) +{ + int nid = zone_to_nid(zone); + unsigned long nr_pages = 0; + int zid = zone_idx(zone); + struct page *page = NULL; + + for (; pfn < end_pfn; pfn++) { + if (!page || pageblock_aligned(pfn)) + page = pfn_to_page(pfn); + else + page++; + + __init_single_page(page, pfn, zid, nid, true); + nr_pages++; + + /* Call cond_resched() only once every 8 pages */ + if ((nr_pages & 7) == 0) + cond_resched(); + } + return (nr_pages); +} #endif /* CONFIG_ROCKCHIP_THUNDER_BOOT_DEFER_FREE_MEMBLOCK */ /* @@ -6830,6 +6855,11 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone } #endif +#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT_DEFER_FREE_MEMBLOCK + if (rk_defer_init_hpages(nid, zone, start_pfn, end_pfn)) + return; +#endif + #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT /* Zero all page struct in advance */ memset(pfn_to_page(start_pfn), 0, sizeof(struct page) * size);