* [PATCH] support tmpfs hugepage PMD is not split when COW
@ 2024-01-10 9:20 Chen Haixiang
2024-01-10 12:00 ` David Hildenbrand
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Chen Haixiang @ 2024-01-10 9:20 UTC (permalink / raw)
To: linux-mm, akpm, hughd
Cc: louhongxiang, wangbin224, liuyuntao10, chenhaixiang3
Transparent hugepages in tmpfs can enhance TLB efficiency by reducing
TLB misses. However, during Copy-On-Write (COW) memory faults, these
hugepages may be split. In some scenarios, preventing this splitting
is desirable. We might introduce a shmem_huge_fault to inhibit this
behavior, along with a mount parameter to enable or disable this function.
Signed-off-by: Chen Haixiang <chenhaixiang3@huawei.com>
---
include/linux/mm.h | 1 +
include/linux/shmem_fs.h | 1 +
mm/memory.c | 7 ++++
mm/shmem.c | 85 ++++++++++++++++++++++++++++++++++++++++
4 files changed, 94 insertions(+)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index da5219b48d52..eb44574965d6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -573,6 +573,7 @@ struct vm_operations_struct {
unsigned long end, unsigned long newflags);
vm_fault_t (*fault)(struct vm_fault *vmf);
vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
+ vm_fault_t (*shmem_huge_fault)(struct vm_fault *vmf, pmd_t orig_pmd);
vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 2caa6b86106a..4484f2f33afe 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -73,6 +73,7 @@ struct shmem_sb_info {
struct list_head shrinklist; /* List of shinkable inodes */
unsigned long shrinklist_len; /* Length of shrinklist */
struct shmem_quota_limits qlimits; /* Default quota limits */
+ unsigned int no_split; /* Do not split shmempmdmaped in tmpfs */
};
static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
diff --git a/mm/memory.c b/mm/memory.c
index 5c757fba8858..7d27a6b5e69f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4942,6 +4942,13 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
}
}
+ if (vmf->vma->vm_ops->shmem_huge_fault) {
+ vm_fault_t ret = vmf->vma->vm_ops->shmem_huge_fault(vmf, vmf->orig_pmd);
+
+ if (!(ret & VM_FAULT_FALLBACK))
+ return ret;
+ }
+
split:
/* COW or write-notify handled on pte level: split pmd. */
__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
diff --git a/mm/shmem.c b/mm/shmem.c
index 0d1ce70bce38..8211211f7405 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -118,6 +118,7 @@ struct shmem_options {
umode_t mode;
bool full_inums;
int huge;
+ unsigned int no_split;
int seen;
bool noswap;
unsigned short quota_types;
@@ -128,6 +129,7 @@ struct shmem_options {
#define SHMEM_SEEN_INUMS 8
#define SHMEM_SEEN_NOSWAP 16
#define SHMEM_SEEN_QUOTA 32
+#define SHMEM_SEEN_NO_SPLIT 64
};
#ifdef CONFIG_TMPFS
@@ -2238,6 +2240,79 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
return ret;
}
+static vm_fault_t shmem_huge_fault(struct vm_fault *vmf, pmd_t orig_pmd)
+{
+ vm_fault_t ret = VM_FAULT_FALLBACK;
+ unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ struct folio *old_folio, *new_folio;
+ pmd_t entry;
+ int gfp_flags = GFP_HIGHUSER_MOVABLE | __GFP_COMP;
+ struct vm_area_struct *vma = vmf->vma;
+ struct shmem_sb_info *sbinfo = NULL;
+ struct inode *inode = file_inode(vma->vm_file);
+ struct shmem_inode_info *info = SHMEM_I(inode);
+
+ sbinfo = SHMEM_SB(info->vfs_inode.i_sb);
+
+ if (sbinfo->no_split == 0)
+ return VM_FAULT_FALLBACK;
+
+ /* ShmemPmdMapped in tmpfs will not split huge pmd */
+ if (!(vmf->flags & FAULT_FLAG_WRITE)
+ || (vma->vm_flags & VM_SHARED))
+ return VM_FAULT_FALLBACK;
+
+ new_folio = vma_alloc_folio(gfp_flags, HPAGE_PMD_ORDER,
+ vmf->vma, haddr, true);
+ if (!new_folio)
+ ret = VM_FAULT_FALLBACK;
+
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (pmd_none(*vmf->pmd)) {
+ ret = VM_FAULT_FALLBACK;
+ goto out;
+ }
+ if (!pmd_same(*vmf->pmd, orig_pmd)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!new_folio) {
+ count_vm_event(THP_FAULT_FALLBACK);
+ ret = VM_FAULT_FALLBACK;
+ goto out;
+ }
+ old_folio = page_folio(pmd_page(*vmf->pmd));
+ page_remove_rmap(&old_folio->page, vma, true);
+ pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
+
+ __folio_set_locked(new_folio);
+ __folio_set_swapbacked(new_folio);
+ __folio_mark_uptodate(new_folio);
+
+ flush_icache_pages(vma, &new_folio->page, HPAGE_PMD_NR);
+ entry = mk_huge_pmd(&new_folio->page, vma->vm_page_prot);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+
+ page_add_file_rmap(&new_folio->page, vma, true);
+ set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
+ update_mmu_cache_pmd(vma, haddr, vmf->pmd);
+ count_vm_event(THP_FILE_MAPPED);
+
+ folio_unlock(new_folio);
+ spin_unlock(vmf->ptl);
+ copy_user_large_folio(new_folio, old_folio, haddr, vma);
+ folio_put(old_folio);
+ ret = 0;
+ return ret;
+
+out:
+ if (new_folio)
+ folio_put(new_folio);
+ spin_unlock(vmf->ptl);
+ return ret;
+}
+
unsigned long shmem_get_unmapped_area(struct file *file,
unsigned long uaddr, unsigned long len,
unsigned long pgoff, unsigned long flags)
@@ -3869,6 +3944,7 @@ enum shmem_param {
Opt_usrquota_inode_hardlimit,
Opt_grpquota_block_hardlimit,
Opt_grpquota_inode_hardlimit,
+ Opt_no_split,
};
static const struct constant_table shmem_param_enums_huge[] = {
@@ -3900,6 +3976,7 @@ const struct fs_parameter_spec shmem_fs_parameters[] = {
fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
#endif
+ fsparam_u32 ("no_split", Opt_no_split),
{}
};
@@ -4065,6 +4142,10 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
"Group quota inode hardlimit too large.");
ctx->qlimits.grpquota_ihardlimit = size;
break;
+ case Opt_no_split:
+ ctx->no_split = result.uint_32;
+ ctx->seen |= SHMEM_SEEN_NO_SPLIT;
+ break;
}
return 0;
@@ -4261,6 +4342,8 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
if (sbinfo->huge)
seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
+ if (sbinfo->huge && sbinfo->no_split)
+ seq_puts(seq, ",no_split");
#endif
mpol = shmem_get_sbmpol(sbinfo);
shmem_show_mpol(seq, mpol);
@@ -4315,6 +4398,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
if (!(ctx->seen & SHMEM_SEEN_INUMS))
ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
sbinfo->noswap = ctx->noswap;
+ sbinfo->no_split = ctx->no_split;
} else {
sb->s_flags |= SB_NOUSER;
}
@@ -4568,6 +4652,7 @@ static const struct super_operations shmem_ops = {
static const struct vm_operations_struct shmem_vm_ops = {
.fault = shmem_fault,
.map_pages = filemap_map_pages,
+ .shmem_huge_fault = shmem_huge_fault,
#ifdef CONFIG_NUMA
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
--
2.33.0
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] support tmpfs hugepage PMD is not split when COW
2024-01-10 9:20 [PATCH] support tmpfs hugepage PMD is not split when COW Chen Haixiang
@ 2024-01-10 12:00 ` David Hildenbrand
2024-01-10 12:44 ` Matthew Wilcox
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: David Hildenbrand @ 2024-01-10 12:00 UTC (permalink / raw)
To: Chen Haixiang, linux-mm, akpm, hughd
Cc: louhongxiang, wangbin224, liuyuntao10
On 10.01.24 10:20, Chen Haixiang wrote:
> Transparent hugepages in tmpfs can enhance TLB efficiency by reducing
> TLB misses. However, during Copy-On-Write (COW) memory faults, these
> hugepages may be split. In some scenarios, preventing this splitting
> is desirable. We might introduce a shmem_huge_fault to inhibit this
> behavior, along with a mount parameter to enable or disable this function.
>
I'm confused, can you describe the problem a bit better, because ...
> Signed-off-by: Chen Haixiang <chenhaixiang3@huawei.com>
> ---
[...]
>
> +static vm_fault_t shmem_huge_fault(struct vm_fault *vmf, pmd_t orig_pmd)
> +{
> + vm_fault_t ret = VM_FAULT_FALLBACK;
> + unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
> + struct folio *old_folio, *new_folio;
> + pmd_t entry;
> + int gfp_flags = GFP_HIGHUSER_MOVABLE | __GFP_COMP;
> + struct vm_area_struct *vma = vmf->vma;
> + struct shmem_sb_info *sbinfo = NULL;
> + struct inode *inode = file_inode(vma->vm_file);
> + struct shmem_inode_info *info = SHMEM_I(inode);
> +
> + sbinfo = SHMEM_SB(info->vfs_inode.i_sb);
> +
> + if (sbinfo->no_split == 0)
> + return VM_FAULT_FALLBACK;
> +
> + /* ShmemPmdMapped in tmpfs will not split huge pmd */
> + if (!(vmf->flags & FAULT_FLAG_WRITE)
> + || (vma->vm_flags & VM_SHARED))
> + return VM_FAULT_FALLBACK;
We do have a private (COW) mapping at this point, and
> +
> + new_folio = vma_alloc_folio(gfp_flags, HPAGE_PMD_ORDER,
> + vmf->vma, haddr, true);
> + if (!new_folio)
> + ret = VM_FAULT_FALLBACK;
> +
> + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
> + if (pmd_none(*vmf->pmd)) {
> + ret = VM_FAULT_FALLBACK;
> + goto out;
> + }
> + if (!pmd_same(*vmf->pmd, orig_pmd)) {
> + ret = 0;
> + goto out;
> + }
> +
> + if (!new_folio) {
> + count_vm_event(THP_FAULT_FALLBACK);
> + ret = VM_FAULT_FALLBACK;
> + goto out;
> + }
> + old_folio = page_folio(pmd_page(*vmf->pmd));
> + page_remove_rmap(&old_folio->page, vma, true);
> + pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
> +
> + __folio_set_locked(new_folio);
> + __folio_set_swapbacked(new_folio);
> + __folio_mark_uptodate(new_folio);
We allocate a fresh folio here and
> +
> + flush_icache_pages(vma, &new_folio->page, HPAGE_PMD_NR);
> + entry = mk_huge_pmd(&new_folio->page, vma->vm_page_prot);
> + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
> +
> + page_add_file_rmap(&new_folio->page, vma, true);
it is not an anonymous one?
... but your are making it writable? In a private mapping?
> + set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
> + update_mmu_cache_pmd(vma, haddr, vmf->pmd);
> + count_vm_event(THP_FILE_MAPPED);
And still acoount it as a file folio? But never add it so some pagecache
structure?
I'm probably missing something, or something is completely wrong here.
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] support tmpfs hugepage PMD is not split when COW
2024-01-10 9:20 [PATCH] support tmpfs hugepage PMD is not split when COW Chen Haixiang
2024-01-10 12:00 ` David Hildenbrand
@ 2024-01-10 12:44 ` Matthew Wilcox
2024-01-11 0:03 ` kernel test robot
2024-01-11 2:10 ` kernel test robot
3 siblings, 0 replies; 5+ messages in thread
From: Matthew Wilcox @ 2024-01-10 12:44 UTC (permalink / raw)
To: Chen Haixiang
Cc: linux-mm, akpm, hughd, louhongxiang, wangbin224, liuyuntao10
On Wed, Jan 10, 2024 at 05:20:28PM +0800, Chen Haixiang wrote:
> +++ b/include/linux/mm.h
> @@ -573,6 +573,7 @@ struct vm_operations_struct {
> unsigned long end, unsigned long newflags);
> vm_fault_t (*fault)(struct vm_fault *vmf);
> vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
> + vm_fault_t (*shmem_huge_fault)(struct vm_fault *vmf, pmd_t orig_pmd);
No new operation for this.
> +++ b/mm/memory.c
> @@ -4942,6 +4942,13 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
> }
> }
>
> + if (vmf->vma->vm_ops->shmem_huge_fault) {
> + vm_fault_t ret = vmf->vma->vm_ops->shmem_huge_fault(vmf, vmf->orig_pmd);
> +
> + if (!(ret & VM_FAULT_FALLBACK))
> + return ret;
> + }
Why would you do this when ->huge_fault is called in the stanza above?
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] support tmpfs hugepage PMD is not split when COW
2024-01-10 9:20 [PATCH] support tmpfs hugepage PMD is not split when COW Chen Haixiang
2024-01-10 12:00 ` David Hildenbrand
2024-01-10 12:44 ` Matthew Wilcox
@ 2024-01-11 0:03 ` kernel test robot
2024-01-11 2:10 ` kernel test robot
3 siblings, 0 replies; 5+ messages in thread
From: kernel test robot @ 2024-01-11 0:03 UTC (permalink / raw)
To: Chen Haixiang, linux-mm, akpm, hughd
Cc: llvm, oe-kbuild-all, louhongxiang, wangbin224, liuyuntao10,
chenhaixiang3
Hi Chen,
kernel test robot noticed the following build errors:
[auto build test ERROR on akpm-mm/mm-everything]
[also build test ERROR on linus/master v6.7 next-20240110]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Chen-Haixiang/support-tmpfs-hugepage-PMD-is-not-split-when-COW/20240110-172314
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20240110092028.1777-1-chenhaixiang3%40huawei.com
patch subject: [PATCH] support tmpfs hugepage PMD is not split when COW
config: arm-mmp2_defconfig (https://download.01.org/0day-ci/archive/20240111/202401110739.T5OMND7z-lkp@intel.com/config)
compiler: clang version 15.0.7 (https://github.com/llvm/llvm-project.git 8dfdcc7b7bf66834a761bd8de445840ef68e4d1a)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240111/202401110739.T5OMND7z-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202401110739.T5OMND7z-lkp@intel.com/
All errors (new ones prefixed by >>):
mm/shmem.c:2278:18: error: use of undeclared identifier 'THP_FAULT_FALLBACK'; did you mean 'VM_FAULT_FALLBACK'?
count_vm_event(THP_FAULT_FALLBACK);
^~~~~~~~~~~~~~~~~~
VM_FAULT_FALLBACK
include/linux/mm_types.h:1219:2: note: 'VM_FAULT_FALLBACK' declared here
VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
^
mm/shmem.c:2278:18: warning: implicit conversion from enumeration type 'enum vm_fault_reason' to different enumeration type 'enum vm_event_item' [-Wenum-conversion]
count_vm_event(THP_FAULT_FALLBACK);
~~~~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~
>> mm/shmem.c:2283:2: error: call to undeclared function 'page_remove_rmap'; ISO C99 and later do not support implicit function declarations [-Werror,-Wimplicit-function-declaration]
page_remove_rmap(&old_folio->page, vma, true);
^
mm/shmem.c:2283:2: note: did you mean 'hugetlb_remove_rmap'?
include/linux/rmap.h:311:20: note: 'hugetlb_remove_rmap' declared here
static inline void hugetlb_remove_rmap(struct folio *folio)
^
>> mm/shmem.c:2291:10: error: call to undeclared function 'mk_huge_pmd'; ISO C99 and later do not support implicit function declarations [-Werror,-Wimplicit-function-declaration]
entry = mk_huge_pmd(&new_folio->page, vma->vm_page_prot);
^
>> mm/shmem.c:2292:28: error: call to undeclared function 'pmd_mkdirty'; ISO C99 and later do not support implicit function declarations [-Werror,-Wimplicit-function-declaration]
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
^
>> mm/shmem.c:2294:2: error: call to undeclared function 'page_add_file_rmap'; ISO C99 and later do not support implicit function declarations [-Werror,-Wimplicit-function-declaration]
page_add_file_rmap(&new_folio->page, vma, true);
^
mm/shmem.c:2294:2: note: did you mean 'hugetlb_add_file_rmap'?
include/linux/rmap.h:303:20: note: 'hugetlb_add_file_rmap' declared here
static inline void hugetlb_add_file_rmap(struct folio *folio)
^
>> mm/shmem.c:2295:2: error: call to undeclared function 'set_pmd_at'; ISO C99 and later do not support implicit function declarations [-Werror,-Wimplicit-function-declaration]
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
^
>> mm/shmem.c:2301:2: error: call to undeclared function 'copy_user_large_folio'; ISO C99 and later do not support implicit function declarations [-Werror,-Wimplicit-function-declaration]
copy_user_large_folio(new_folio, old_folio, haddr, vma);
^
1 warning and 7 errors generated.
vim +/page_remove_rmap +2283 mm/shmem.c
2239
2240 static vm_fault_t shmem_huge_fault(struct vm_fault *vmf, pmd_t orig_pmd)
2241 {
2242 vm_fault_t ret = VM_FAULT_FALLBACK;
2243 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2244 struct folio *old_folio, *new_folio;
2245 pmd_t entry;
2246 int gfp_flags = GFP_HIGHUSER_MOVABLE | __GFP_COMP;
2247 struct vm_area_struct *vma = vmf->vma;
2248 struct shmem_sb_info *sbinfo = NULL;
2249 struct inode *inode = file_inode(vma->vm_file);
2250 struct shmem_inode_info *info = SHMEM_I(inode);
2251
2252 sbinfo = SHMEM_SB(info->vfs_inode.i_sb);
2253
2254 if (sbinfo->no_split == 0)
2255 return VM_FAULT_FALLBACK;
2256
2257 /* ShmemPmdMapped in tmpfs will not split huge pmd */
2258 if (!(vmf->flags & FAULT_FLAG_WRITE)
2259 || (vma->vm_flags & VM_SHARED))
2260 return VM_FAULT_FALLBACK;
2261
2262 new_folio = vma_alloc_folio(gfp_flags, HPAGE_PMD_ORDER,
2263 vmf->vma, haddr, true);
2264 if (!new_folio)
2265 ret = VM_FAULT_FALLBACK;
2266
2267 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2268 if (pmd_none(*vmf->pmd)) {
2269 ret = VM_FAULT_FALLBACK;
2270 goto out;
2271 }
2272 if (!pmd_same(*vmf->pmd, orig_pmd)) {
2273 ret = 0;
2274 goto out;
2275 }
2276
2277 if (!new_folio) {
> 2278 count_vm_event(THP_FAULT_FALLBACK);
2279 ret = VM_FAULT_FALLBACK;
2280 goto out;
2281 }
2282 old_folio = page_folio(pmd_page(*vmf->pmd));
> 2283 page_remove_rmap(&old_folio->page, vma, true);
2284 pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
2285
2286 __folio_set_locked(new_folio);
2287 __folio_set_swapbacked(new_folio);
2288 __folio_mark_uptodate(new_folio);
2289
2290 flush_icache_pages(vma, &new_folio->page, HPAGE_PMD_NR);
> 2291 entry = mk_huge_pmd(&new_folio->page, vma->vm_page_prot);
> 2292 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2293
> 2294 page_add_file_rmap(&new_folio->page, vma, true);
> 2295 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
2296 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
2297 count_vm_event(THP_FILE_MAPPED);
2298
2299 folio_unlock(new_folio);
2300 spin_unlock(vmf->ptl);
> 2301 copy_user_large_folio(new_folio, old_folio, haddr, vma);
2302 folio_put(old_folio);
2303 ret = 0;
2304 return ret;
2305
2306 out:
2307 if (new_folio)
2308 folio_put(new_folio);
2309 spin_unlock(vmf->ptl);
2310 return ret;
2311 }
2312
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] support tmpfs hugepage PMD is not split when COW
2024-01-10 9:20 [PATCH] support tmpfs hugepage PMD is not split when COW Chen Haixiang
` (2 preceding siblings ...)
2024-01-11 0:03 ` kernel test robot
@ 2024-01-11 2:10 ` kernel test robot
3 siblings, 0 replies; 5+ messages in thread
From: kernel test robot @ 2024-01-11 2:10 UTC (permalink / raw)
To: Chen Haixiang, linux-mm, akpm, hughd
Cc: llvm, oe-kbuild-all, louhongxiang, wangbin224, liuyuntao10,
chenhaixiang3
Hi Chen,
kernel test robot noticed the following build errors:
[auto build test ERROR on akpm-mm/mm-everything]
[also build test ERROR on linus/master v6.7 next-20240110]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Chen-Haixiang/support-tmpfs-hugepage-PMD-is-not-split-when-COW/20240110-172314
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20240110092028.1777-1-chenhaixiang3%40huawei.com
patch subject: [PATCH] support tmpfs hugepage PMD is not split when COW
config: arm64-randconfig-002-20240110 (https://download.01.org/0day-ci/archive/20240111/202401110924.z8NSGZ0Z-lkp@intel.com/config)
compiler: clang version 18.0.0git (https://github.com/llvm/llvm-project 7388b7422f9307dd5ae3fe3876a676d83d702daf)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240111/202401110924.z8NSGZ0Z-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202401110924.z8NSGZ0Z-lkp@intel.com/
All errors (new ones prefixed by >>):
mm/shmem.c:2278:18: error: use of undeclared identifier 'THP_FAULT_FALLBACK'; did you mean 'VM_FAULT_FALLBACK'?
2278 | count_vm_event(THP_FAULT_FALLBACK);
| ^~~~~~~~~~~~~~~~~~
| VM_FAULT_FALLBACK
include/linux/mm_types.h:1219:2: note: 'VM_FAULT_FALLBACK' declared here
1219 | VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
| ^
mm/shmem.c:2278:18: warning: implicit conversion from enumeration type 'enum vm_fault_reason' to different enumeration type 'enum vm_event_item' [-Wenum-conversion]
2278 | count_vm_event(THP_FAULT_FALLBACK);
| ~~~~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~
mm/shmem.c:2283:2: error: call to undeclared function 'page_remove_rmap'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
2283 | page_remove_rmap(&old_folio->page, vma, true);
| ^
mm/shmem.c:2283:2: note: did you mean 'hugetlb_remove_rmap'?
include/linux/rmap.h:311:20: note: 'hugetlb_remove_rmap' declared here
311 | static inline void hugetlb_remove_rmap(struct folio *folio)
| ^
mm/shmem.c:2291:10: error: call to undeclared function 'mk_huge_pmd'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
2291 | entry = mk_huge_pmd(&new_folio->page, vma->vm_page_prot);
| ^
mm/shmem.c:2291:10: note: did you mean 'mk_huge_pte'?
include/asm-generic/hugetlb.h:8:21: note: 'mk_huge_pte' declared here
8 | static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
| ^
>> mm/shmem.c:2291:8: error: assigning to 'pmd_t' from incompatible type 'int'
2291 | entry = mk_huge_pmd(&new_folio->page, vma->vm_page_prot);
| ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mm/shmem.c:2294:2: error: call to undeclared function 'page_add_file_rmap'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
2294 | page_add_file_rmap(&new_folio->page, vma, true);
| ^
mm/shmem.c:2294:2: note: did you mean 'hugetlb_add_file_rmap'?
include/linux/rmap.h:303:20: note: 'hugetlb_add_file_rmap' declared here
303 | static inline void hugetlb_add_file_rmap(struct folio *folio)
| ^
1 warning and 5 errors generated.
vim +2291 mm/shmem.c
2239
2240 static vm_fault_t shmem_huge_fault(struct vm_fault *vmf, pmd_t orig_pmd)
2241 {
2242 vm_fault_t ret = VM_FAULT_FALLBACK;
2243 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2244 struct folio *old_folio, *new_folio;
2245 pmd_t entry;
2246 int gfp_flags = GFP_HIGHUSER_MOVABLE | __GFP_COMP;
2247 struct vm_area_struct *vma = vmf->vma;
2248 struct shmem_sb_info *sbinfo = NULL;
2249 struct inode *inode = file_inode(vma->vm_file);
2250 struct shmem_inode_info *info = SHMEM_I(inode);
2251
2252 sbinfo = SHMEM_SB(info->vfs_inode.i_sb);
2253
2254 if (sbinfo->no_split == 0)
2255 return VM_FAULT_FALLBACK;
2256
2257 /* ShmemPmdMapped in tmpfs will not split huge pmd */
2258 if (!(vmf->flags & FAULT_FLAG_WRITE)
2259 || (vma->vm_flags & VM_SHARED))
2260 return VM_FAULT_FALLBACK;
2261
2262 new_folio = vma_alloc_folio(gfp_flags, HPAGE_PMD_ORDER,
2263 vmf->vma, haddr, true);
2264 if (!new_folio)
2265 ret = VM_FAULT_FALLBACK;
2266
2267 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
2268 if (pmd_none(*vmf->pmd)) {
2269 ret = VM_FAULT_FALLBACK;
2270 goto out;
2271 }
2272 if (!pmd_same(*vmf->pmd, orig_pmd)) {
2273 ret = 0;
2274 goto out;
2275 }
2276
2277 if (!new_folio) {
2278 count_vm_event(THP_FAULT_FALLBACK);
2279 ret = VM_FAULT_FALLBACK;
2280 goto out;
2281 }
2282 old_folio = page_folio(pmd_page(*vmf->pmd));
2283 page_remove_rmap(&old_folio->page, vma, true);
2284 pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
2285
2286 __folio_set_locked(new_folio);
2287 __folio_set_swapbacked(new_folio);
2288 __folio_mark_uptodate(new_folio);
2289
2290 flush_icache_pages(vma, &new_folio->page, HPAGE_PMD_NR);
> 2291 entry = mk_huge_pmd(&new_folio->page, vma->vm_page_prot);
2292 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2293
2294 page_add_file_rmap(&new_folio->page, vma, true);
2295 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
2296 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
2297 count_vm_event(THP_FILE_MAPPED);
2298
2299 folio_unlock(new_folio);
2300 spin_unlock(vmf->ptl);
2301 copy_user_large_folio(new_folio, old_folio, haddr, vma);
2302 folio_put(old_folio);
2303 ret = 0;
2304 return ret;
2305
2306 out:
2307 if (new_folio)
2308 folio_put(new_folio);
2309 spin_unlock(vmf->ptl);
2310 return ret;
2311 }
2312
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2024-01-11 2:11 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-01-10 9:20 [PATCH] support tmpfs hugepage PMD is not split when COW Chen Haixiang
2024-01-10 12:00 ` David Hildenbrand
2024-01-10 12:44 ` Matthew Wilcox
2024-01-11 0:03 ` kernel test robot
2024-01-11 2:10 ` kernel test robot
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox