Hi Muhammad, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on next-20221109] [also build test WARNING on v6.1-rc4] [cannot apply to shuah-kselftest/next shuah-kselftest/fixes linus/master v6.1-rc4 v6.1-rc3 v6.1-rc2] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Muhammad-Usama-Anjum/Implement-IOCTL-to-get-and-or-the-clear-info-about-PTEs/20221109-182618 patch link: https://lore.kernel.org/r/20221109102303.851281-3-usama.anjum%40collabora.com patch subject: [PATCH v6 2/3] fs/proc/task_mmu: Implement IOCTL to get and/or the clear info about PTEs config: m68k-allyesconfig compiler: m68k-linux-gcc (GCC) 12.1.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/intel-lab-lkp/linux/commit/b329378abd03a741ff7250ec1b60292c893476da git remote add linux-review https://github.com/intel-lab-lkp/linux git fetch --no-tags linux-review Muhammad-Usama-Anjum/Implement-IOCTL-to-get-and-or-the-clear-info-about-PTEs/20221109-182618 git checkout b329378abd03a741ff7250ec1b60292c893476da # save the config file mkdir build_dir && cp config build_dir/.config COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=m68k SHELL=/bin/bash fs/proc/ If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot All warnings (new ones prefixed by >>): fs/proc/task_mmu.c: In function 'pagemap_scan_pmd_entry': fs/proc/task_mmu.c:1882:62: error: 'HPAGE_SIZE' undeclared (first use in this function); did you mean 'PAGE_SIZE'? 1882 | if ((IS_CLEAR_OP(p) && (end - addr < HPAGE_SIZE))) { | ^~~~~~~~~~ | PAGE_SIZE fs/proc/task_mmu.c:1882:62: note: each undeclared identifier is reported only once for each function it appears in In file included from include/asm-generic/bug.h:5, from arch/m68k/include/asm/bug.h:32, from include/linux/bug.h:5, from include/linux/mmdebug.h:5, from include/linux/mm.h:6, from include/linux/pagewalk.h:5, from fs/proc/task_mmu.c:2: fs/proc/task_mmu.c: In function 'do_pagemap_sd_cmd': >> fs/proc/task_mmu.c:2014:49: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] 2014 | ((arg->vec_len == 0) || (!access_ok((struct page_region *)arg->vec, arg->vec_len)))) | ^ include/linux/compiler.h:77:45: note: in definition of macro 'likely' 77 | # define likely(x) __builtin_expect(!!(x), 1) | ^ fs/proc/task_mmu.c:2014:39: note: in expansion of macro 'access_ok' 2014 | ((arg->vec_len == 0) || (!access_ok((struct page_region *)arg->vec, arg->vec_len)))) | ^~~~~~~~~ fs/proc/task_mmu.c:2079:34: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] 2079 | if (copy_to_user((struct page_region *)arg->vec, p.vec, | ^ vim +2014 fs/proc/task_mmu.c 1856 1857 static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long addr, 1858 unsigned long end, struct mm_walk *walk) 1859 { 1860 struct pagemap_scan_private *p = walk->private; 1861 struct vm_area_struct *vma = walk->vma; 1862 unsigned int len; 1863 spinlock_t *ptl; 1864 int ret = 0; 1865 pte_t *pte; 1866 bool dirty_vma = (p->flags & PAGEMAP_NO_REUSED_REGIONS) ? 1867 (false) : (vma->vm_flags & VM_SOFTDIRTY); 1868 1869 if ((walk->vma->vm_end < addr) || (p->max_pages && p->found_pages == p->max_pages)) 1870 return 0; 1871 1872 end = min(end, walk->vma->vm_end); 1873 1874 ptl = pmd_trans_huge_lock(pmd, vma); 1875 if (ptl) { 1876 if (dirty_vma || check_soft_dirty_pmd(vma, addr, pmd, false)) { 1877 /* 1878 * Break huge page into small pages if operation needs to be performed is 1879 * on a portion of the huge page or the return buffer cannot store complete 1880 * data. 1881 */ > 1882 if ((IS_CLEAR_OP(p) && (end - addr < HPAGE_SIZE))) { 1883 spin_unlock(ptl); 1884 split_huge_pmd(vma, pmd, addr); 1885 goto process_smaller_pages; 1886 } 1887 1888 if (IS_GET_OP(p)) { 1889 len = (end - addr)/PAGE_SIZE; 1890 if (p->max_pages && p->found_pages + len > p->max_pages) 1891 len = p->max_pages - p->found_pages; 1892 1893 ret = add_to_out(dirty_vma || 1894 check_soft_dirty_pmd(vma, addr, pmd, false), 1895 vma->vm_file, pmd_present(*pmd), is_swap_pmd(*pmd), 1896 p, addr, len); 1897 } 1898 if (!ret && IS_CLEAR_OP(p)) 1899 check_soft_dirty_pmd(vma, addr, pmd, true); 1900 } 1901 spin_unlock(ptl); 1902 return 0; 1903 } 1904 1905 process_smaller_pages: 1906 if (pmd_trans_unstable(pmd)) 1907 return 0; 1908 1909 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1910 for (; addr < end && !ret && (!p->max_pages || (p->found_pages < p->max_pages)) 1911 ; pte++, addr += PAGE_SIZE) { 1912 if (IS_GET_OP(p)) 1913 ret = add_to_out(dirty_vma || check_soft_dirty(vma, addr, pte, false), 1914 vma->vm_file, pte_present(*pte), 1915 is_swap_pte(*pte), p, addr, 1); 1916 if (!ret && IS_CLEAR_OP(p)) 1917 check_soft_dirty(vma, addr, pte, true); 1918 } 1919 pte_unmap_unlock(pte - 1, ptl); 1920 cond_resched(); 1921 1922 return 0; 1923 } 1924 1925 static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end, int depth, 1926 struct mm_walk *walk) 1927 { 1928 struct pagemap_scan_private *p = walk->private; 1929 struct vm_area_struct *vma = walk->vma; 1930 unsigned int len; 1931 bool sd; 1932 1933 if (vma) { 1934 /* Individual pages haven't been allocated and written */ 1935 sd = (p->flags & PAGEMAP_NO_REUSED_REGIONS) ? (false) : 1936 (vma->vm_flags & VM_SOFTDIRTY); 1937 1938 len = (end - addr)/PAGE_SIZE; 1939 if (p->max_pages && p->found_pages + len > p->max_pages) 1940 len = p->max_pages - p->found_pages; 1941 1942 add_to_out(sd, vma->vm_file, false, false, p, addr, len); 1943 } 1944 1945 return 0; 1946 } 1947 1948 #ifdef CONFIG_MEM_SOFT_DIRTY 1949 static int pagemap_scan_pre_vma(unsigned long start, unsigned long end, struct mm_walk *walk) 1950 { 1951 struct pagemap_scan_private *p = walk->private; 1952 struct vm_area_struct *vma = walk->vma; 1953 unsigned long end_cut = end; 1954 int ret; 1955 1956 if (!(p->flags & PAGEMAP_NO_REUSED_REGIONS) && IS_CLEAR_OP(p) && 1957 (vma->vm_flags & VM_SOFTDIRTY)) { 1958 if (vma->vm_start < start) { 1959 ret = split_vma(vma->vm_mm, vma, start, 1); 1960 if (ret) 1961 return ret; 1962 } 1963 /* Calculate end_cut because of max_pages */ 1964 if (IS_GET_OP(p) && p->max_pages) 1965 end_cut = min(start + (p->max_pages - p->found_pages) * PAGE_SIZE, end); 1966 1967 if (vma->vm_end > end_cut) { 1968 ret = split_vma(vma->vm_mm, vma, end_cut, 0); 1969 if (ret) 1970 return ret; 1971 } 1972 } 1973 1974 return 0; 1975 } 1976 1977 static void pagemap_scan_post_vma(struct mm_walk *walk) 1978 { 1979 struct pagemap_scan_private *p = walk->private; 1980 struct vm_area_struct *vma = walk->vma; 1981 1982 if (!(p->flags & PAGEMAP_NO_REUSED_REGIONS) && IS_CLEAR_OP(p) && 1983 (vma->vm_flags & VM_SOFTDIRTY)) { 1984 vma->vm_flags &= ~VM_SOFTDIRTY; 1985 vma_set_page_prot(vma); 1986 } 1987 } 1988 #endif /* CONFIG_MEM_SOFT_DIRTY */ 1989 1990 static const struct mm_walk_ops pagemap_scan_ops = { 1991 .test_walk = pagemap_scan_pmd_test_walk, 1992 .pmd_entry = pagemap_scan_pmd_entry, 1993 .pte_hole = pagemap_scan_pte_hole, 1994 1995 #ifdef CONFIG_MEM_SOFT_DIRTY 1996 /* Only for clearing SD bit over VMAs */ 1997 .pre_vma = pagemap_scan_pre_vma, 1998 .post_vma = pagemap_scan_post_vma, 1999 #endif /* CONFIG_MEM_SOFT_DIRTY */ 2000 }; 2001 2002 static long do_pagemap_sd_cmd(struct mm_struct *mm, struct pagemap_scan_arg *arg) 2003 { 2004 struct mmu_notifier_range range; 2005 unsigned long __user start, end; 2006 struct pagemap_scan_private p; 2007 int ret; 2008 2009 start = (unsigned long)untagged_addr(arg->start); 2010 if ((!IS_ALIGNED(start, PAGE_SIZE)) || (!access_ok((void __user *)start, arg->len))) 2011 return -EINVAL; 2012 2013 if (IS_GET_OP(arg) && > 2014 ((arg->vec_len == 0) || (!access_ok((struct page_region *)arg->vec, arg->vec_len)))) 2015 return -ENOMEM; 2016 -- 0-DAY CI Kernel Test Service https://01.org/lkp