diff -up ./drivers/hv/hv_balloon.c.ha00 ./drivers/hv/hv_balloon.c --- ./drivers/hv/hv_balloon.c.ha00 2020-02-12 18:58:25.000000000 +0900 +++ ./drivers/hv/hv_balloon.c 2020-02-17 09:19:27.047752736 +0900 @@ -730,8 +730,10 @@ static void hv_mem_hot_add(unsigned long dm_device.ha_waiting = !memhp_auto_online; nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); +pr_info("%s: calling add_memory(nid=%d, ((start_pfn=0x%lx) << PAGE_SHIFT)=0x%llx, (HA_CHUNK << PAGE_SHIFT)=%lu)\n", __func__, nid, start_pfn, ((unsigned long long)start_pfn << PAGE_SHIFT), (HA_CHUNK << PAGE_SHIFT)); ret = add_memory(nid, PFN_PHYS((start_pfn)), (HA_CHUNK << PAGE_SHIFT)); +pr_info("%s: add_memory() returned %d\n", __func__, ret); if (ret) { pr_err("hot_add memory failed error is %d\n", ret); diff -up ./mm/memory_hotplug.c.ha00 ./mm/memory_hotplug.c --- ./mm/memory_hotplug.c.ha00 2020-02-12 18:59:43.000000000 +0900 +++ ./mm/memory_hotplug.c 2020-02-17 20:33:07.246405401 +0900 @@ -574,6 +574,7 @@ EXPORT_SYMBOL_GPL(restore_online_page_ca void generic_online_page(struct page *page, unsigned int order) { +//pr_info("%s: called as (page=0x%px order=%u)\n", __func__, page, order); /*2b9c821d*/ kernel_map_pages(page, 1 << order, 1); __free_pages_core(page, order); totalram_pages_add(1UL << order); @@ -711,15 +712,19 @@ static struct zone *default_kernel_zone_ { struct pglist_data *pgdat = NODE_DATA(nid); int zid; + enum zone_type default_zone = ZONE_NORMAL; /*9faf47bd*/ - for (zid = 0; zid <= ZONE_NORMAL; zid++) { +#ifdef CONFIG_HIGHMEM /*9faf47bd*/ + default_zone = ZONE_HIGHMEM; /*9faf47bd*/ +#endif /*9faf47bd*/ + for (zid = 0; zid <= default_zone; zid++) { /*9faf47bd*/ struct zone *zone = &pgdat->node_zones[zid]; if (zone_intersects(zone, start_pfn, nr_pages)) return zone; } - return &pgdat->node_zones[ZONE_NORMAL]; + return &pgdat->node_zones[default_zone]; /*9faf47bd*/ } static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, @@ -774,6 +779,7 @@ int __ref online_pages(unsigned long pfn zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages); move_pfn_range_to_zone(zone, pfn, nr_pages, NULL); +pr_info("%s: pfn: %lx - %lx (zone: %s)\n", __func__, pfn, pfn + nr_pages, zone->name); /*2b9c821d*/ arg.start_pfn = pfn; arg.nr_pages = nr_pages; node_states_check_changes_online(nr_pages, zone, &arg); diff -up ./mm/sparse.c.ha00 ./mm/sparse.c --- ./mm/sparse.c.ha00 2020-02-17 09:19:27.029752732 +0900 +++ ./mm/sparse.c 2020-02-17 16:41:45.775823324 +0900 @@ -664,22 +664,29 @@ static void free_map_bootmem(struct page struct page * __meminit populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap) { - struct page *page, *ret; + struct page * volatile page, *ret; unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); - if (page) + if (page) { goto got_map_page; + } +pr_info("%s: alloc_pages() returned 0x%p (should be 0), reverting to vmalloc(memmap_size=%lu)\n", __func__, page, memmap_size); +BUG_ON(page != 0); ret = vmalloc(memmap_size); - if (ret) +pr_info("%s: vmalloc(%lu) returned 0x%p\n", __func__, memmap_size, ret); + if (ret) { goto got_map_ptr; + } return NULL; got_map_page: ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); +pr_info("%s: allocated struct page *page=0x%p\n", __func__, page); got_map_ptr: +pr_info("%s: returning struct page * =0x%p\n", __func__, ret); return ret; }