tree: https://github.com/ammarfaizi2/linux-block stable/linux-stable-rc/queue/4.14 head: 637760492f234c48997e3edea1027fb576097992 commit: 153ce64d56e7654e0499b06beae5da5d4756d0c3 [3/265] mm/khugepaged: fix GUP-fast interaction by sending IPI config: arm-randconfig-r022-20230101 compiler: arm-linux-gnueabi-gcc (GCC) 7.5.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/ammarfaizi2/linux-block/commit/153ce64d56e7654e0499b06beae5da5d4756d0c3 git remote add ammarfaizi2-block https://github.com/ammarfaizi2/linux-block git fetch --no-tags ammarfaizi2-block stable/linux-stable-rc/queue/4.14 git checkout 153ce64d56e7654e0499b06beae5da5d4756d0c3 # save the config file mkdir build_dir && cp config build_dir/.config COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-7.5.0 make.cross W=1 O=build_dir ARCH=arm olddefconfig COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-7.5.0 make.cross W=1 O=build_dir ARCH=arm SHELL=/bin/bash If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot All errors (new ones prefixed by >>): mm/khugepaged.c: In function 'collapse_huge_page': >> mm/khugepaged.c:1062:2: error: implicit declaration of function 'tlb_remove_table_sync_one'; did you mean 'tlb_remove_page_size'? [-Werror=implicit-function-declaration] tlb_remove_table_sync_one(); ^~~~~~~~~~~~~~~~~~~~~~~~~ tlb_remove_page_size cc1: some warnings being treated as errors vim +1062 mm/khugepaged.c 960 961 static void collapse_huge_page(struct mm_struct *mm, 962 unsigned long address, 963 struct page **hpage, 964 int node, int referenced) 965 { 966 pmd_t *pmd, _pmd; 967 pte_t *pte; 968 pgtable_t pgtable; 969 struct page *new_page; 970 spinlock_t *pmd_ptl, *pte_ptl; 971 int isolated = 0, result = 0; 972 struct mem_cgroup *memcg; 973 struct vm_area_struct *vma; 974 unsigned long mmun_start; /* For mmu_notifiers */ 975 unsigned long mmun_end; /* For mmu_notifiers */ 976 gfp_t gfp; 977 978 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 979 980 /* Only allocate from the target node */ 981 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; 982 983 /* 984 * Before allocating the hugepage, release the mmap_sem read lock. 985 * The allocation can take potentially a long time if it involves 986 * sync compaction, and we do not need to hold the mmap_sem during 987 * that. We will recheck the vma after taking it again in write mode. 988 */ 989 up_read(&mm->mmap_sem); 990 new_page = khugepaged_alloc_page(hpage, gfp, node); 991 if (!new_page) { 992 result = SCAN_ALLOC_HUGE_PAGE_FAIL; 993 goto out_nolock; 994 } 995 996 /* Do not oom kill for khugepaged charges */ 997 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY, 998 &memcg, true))) { 999 result = SCAN_CGROUP_CHARGE_FAIL; 1000 goto out_nolock; 1001 } 1002 1003 down_read(&mm->mmap_sem); 1004 result = hugepage_vma_revalidate(mm, address, &vma); 1005 if (result) { 1006 mem_cgroup_cancel_charge(new_page, memcg, true); 1007 up_read(&mm->mmap_sem); 1008 goto out_nolock; 1009 } 1010 1011 pmd = mm_find_pmd(mm, address); 1012 if (!pmd) { 1013 result = SCAN_PMD_NULL; 1014 mem_cgroup_cancel_charge(new_page, memcg, true); 1015 up_read(&mm->mmap_sem); 1016 goto out_nolock; 1017 } 1018 1019 /* 1020 * __collapse_huge_page_swapin always returns with mmap_sem locked. 1021 * If it fails, we release mmap_sem and jump out_nolock. 1022 * Continuing to collapse causes inconsistency. 1023 */ 1024 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { 1025 mem_cgroup_cancel_charge(new_page, memcg, true); 1026 up_read(&mm->mmap_sem); 1027 goto out_nolock; 1028 } 1029 1030 up_read(&mm->mmap_sem); 1031 /* 1032 * Prevent all access to pagetables with the exception of 1033 * gup_fast later handled by the ptep_clear_flush and the VM 1034 * handled by the anon_vma lock + PG_lock. 1035 */ 1036 down_write(&mm->mmap_sem); 1037 result = hugepage_vma_revalidate(mm, address, &vma); 1038 if (result) 1039 goto out; 1040 /* check if the pmd is still valid */ 1041 if (mm_find_pmd(mm, address) != pmd) 1042 goto out; 1043 1044 anon_vma_lock_write(vma->anon_vma); 1045 1046 pte = pte_offset_map(pmd, address); 1047 pte_ptl = pte_lockptr(mm, pmd); 1048 1049 mmun_start = address; 1050 mmun_end = address + HPAGE_PMD_SIZE; 1051 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1052 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1053 /* 1054 * After this gup_fast can't run anymore. This also removes 1055 * any huge TLB entry from the CPU so we won't allow 1056 * huge and small TLB entries for the same virtual address 1057 * to avoid the risk of CPU bugs in that area. 1058 */ 1059 _pmd = pmdp_collapse_flush(vma, address, pmd); 1060 spin_unlock(pmd_ptl); 1061 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); > 1062 tlb_remove_table_sync_one(); 1063 1064 spin_lock(pte_ptl); 1065 isolated = __collapse_huge_page_isolate(vma, address, pte); 1066 spin_unlock(pte_ptl); 1067 1068 if (unlikely(!isolated)) { 1069 pte_unmap(pte); 1070 spin_lock(pmd_ptl); 1071 BUG_ON(!pmd_none(*pmd)); 1072 /* 1073 * We can only use set_pmd_at when establishing 1074 * hugepmds and never for establishing regular pmds that 1075 * points to regular pagetables. Use pmd_populate for that 1076 */ 1077 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 1078 spin_unlock(pmd_ptl); 1079 anon_vma_unlock_write(vma->anon_vma); 1080 result = SCAN_FAIL; 1081 goto out; 1082 } 1083 1084 /* 1085 * All pages are isolated and locked so anon_vma rmap 1086 * can't run anymore. 1087 */ 1088 anon_vma_unlock_write(vma->anon_vma); 1089 1090 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); 1091 pte_unmap(pte); 1092 __SetPageUptodate(new_page); 1093 pgtable = pmd_pgtable(_pmd); 1094 1095 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); 1096 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1097 1098 /* 1099 * spin_lock() below is not the equivalent of smp_wmb(), so 1100 * this is needed to avoid the copy_huge_page writes to become 1101 * visible after the set_pmd_at() write. 1102 */ 1103 smp_wmb(); 1104 1105 spin_lock(pmd_ptl); 1106 BUG_ON(!pmd_none(*pmd)); 1107 page_add_new_anon_rmap(new_page, vma, address, true); 1108 mem_cgroup_commit_charge(new_page, memcg, false, true); 1109 lru_cache_add_active_or_unevictable(new_page, vma); 1110 pgtable_trans_huge_deposit(mm, pmd, pgtable); 1111 set_pmd_at(mm, address, pmd, _pmd); 1112 update_mmu_cache_pmd(vma, address, pmd); 1113 spin_unlock(pmd_ptl); 1114 1115 *hpage = NULL; 1116 1117 khugepaged_pages_collapsed++; 1118 result = SCAN_SUCCEED; 1119 out_up_write: 1120 up_write(&mm->mmap_sem); 1121 out_nolock: 1122 trace_mm_collapse_huge_page(mm, isolated, result); 1123 return; 1124 out: 1125 mem_cgroup_cancel_charge(new_page, memcg, true); 1126 goto out_up_write; 1127 } 1128 -- 0-DAY CI Kernel Test Service https://01.org/lkp