linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Usama Arif <usamaarif642@gmail.com>
To: sj@kernel.org, akpm@linux-foundation.org
Cc: damon@lists.linux.dev, linux-mm@kvack.org, hannes@cmpxchg.org,
	david@redhat.com, kernel-team@meta.com,
	Usama Arif <usamaarif642@gmail.com>
Subject: [PATCH v4 2/6] mm/damon/paddr: use damon_get_folio_in_region to obtain folio
Date: Mon,  3 Feb 2025 22:55:29 +0000	[thread overview]
Message-ID: <20250203225604.44742-3-usamaarif642@gmail.com> (raw)
In-Reply-To: <20250203225604.44742-1-usamaarif642@gmail.com>

This is introduced for larger folios. If a large folio has subpages
present in multiple regions, it will be considered multiple times.
This can be when checking access or applying DAMOS schemes. For e.g.
in pa_stat, folios split across N regions will be counted N times,
giving inaccurate results. Hence, only consider a page for access
check/DAMOS scheme only if the head page is part of that region as
well.

Signed-off-by: Usama Arif <usamaarif642@gmail.com>
---
 mm/damon/paddr.c | 44 ++++++++++++++++++++++++++++++++++----------
 1 file changed, 34 insertions(+), 10 deletions(-)

diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 0fb61f6ddb8d..3f59a3fdc391 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -19,6 +19,30 @@
 #include "../internal.h"
 #include "ops-common.h"
 
+/*
+ * Get an online page for a paddr if it's in the LRU list and if the head page is
+ * before region_start. Otherwise, returns NULL.
+ */
+static struct folio *damon_get_folio_in_region(unsigned long addr, unsigned long region_start)
+{
+	struct page *page = pfn_to_online_page(PHYS_PFN(addr));
+	struct folio *folio;
+
+	if (!page)
+		return NULL;
+
+	folio = page_folio(page);
+	if (addr - folio_page_idx(folio, page) * PAGE_SIZE < region_start)
+		return NULL;
+	if (!folio_test_lru(folio) || !folio_try_get(folio))
+		return NULL;
+	if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
+		folio_put(folio);
+		folio = NULL;
+	}
+	return folio;
+}
+
 static bool damon_folio_mkold_one(struct folio *folio,
 		struct vm_area_struct *vma, unsigned long addr, void *arg)
 {
@@ -58,9 +82,9 @@ static void damon_folio_mkold(struct folio *folio)
 
 }
 
-static void damon_pa_mkold(unsigned long paddr)
+static void damon_pa_mkold(unsigned long paddr, unsigned long region_start)
 {
-	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
+	struct folio *folio = damon_get_folio_in_region(paddr, region_start);
 
 	if (!folio)
 		return;
@@ -73,7 +97,7 @@ static void __damon_pa_prepare_access_check(struct damon_region *r)
 {
 	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
 
-	damon_pa_mkold(r->sampling_addr);
+	damon_pa_mkold(r->sampling_addr, r->ar.start);
 }
 
 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
@@ -148,9 +172,9 @@ static bool damon_folio_young(struct folio *folio)
 	return accessed;
 }
 
-static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
+static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz, unsigned long region_start)
 {
-	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
+	struct folio *folio = damon_get_folio_in_region(paddr, region_start);
 	bool accessed;
 
 	if (!folio)
@@ -176,7 +200,7 @@ static void __damon_pa_check_access(struct damon_region *r,
 		return;
 	}
 
-	last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
+	last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz, r->ar.start);
 	damon_update_region_access_rate(r, last_accessed, attrs);
 
 	last_addr = r->sampling_addr;
@@ -268,7 +292,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
 
 	addr = r->ar.start;
 	while (addr < r->ar.end) {
-		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+		struct folio *folio = damon_get_folio_in_region(addr, r->ar.start);
 
 		if (!folio) {
 			addr += PAGE_SIZE;
@@ -307,7 +331,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
 
 	addr = r->ar.start;
 	while (addr < r->ar.end) {
-		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+		struct folio *folio = damon_get_folio_in_region(addr, r->ar.start);
 
 		if (!folio) {
 			addr += PAGE_SIZE;
@@ -474,7 +498,7 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
 
 	addr = r->ar.start;
 	while (addr < r->ar.end) {
-		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+		struct folio *folio = damon_get_folio_in_region(addr, r->ar.start);
 
 		if (!folio) {
 			addr += PAGE_SIZE;
@@ -518,7 +542,7 @@ static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
 
 	addr = r->ar.start;
 	while (addr < r->ar.end) {
-		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+		struct folio *folio = damon_get_folio_in_region(addr, r->ar.start);
 
 		if (!folio) {
 			addr += PAGE_SIZE;
-- 
2.43.5



  parent reply	other threads:[~2025-02-03 22:56 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-03 22:55 [PATCH v4 0/6] mm/damon: add support for hugepages Usama Arif
2025-02-03 22:55 ` [PATCH v4 1/6] mm/damon: have damon_get_folio return folio even for tail pages Usama Arif
2025-02-03 22:55 ` Usama Arif [this message]
2025-02-04 23:06   ` [PATCH v4 2/6] mm/damon/paddr: use damon_get_folio_in_region to obtain folio SeongJae Park
2025-02-05 12:46     ` Usama Arif
2025-02-05 21:40       ` SeongJae Park
2025-02-03 22:55 ` [PATCH v4 3/6] mm/damon/sysfs-schemes: add files for setting damos_filter->folio_size Usama Arif
2025-02-04 23:10   ` SeongJae Park
2025-02-05 13:57     ` Usama Arif
2025-02-05 21:44       ` SeongJae Park
2025-02-03 22:55 ` [PATCH v4 4/6] mm/damon: introduce DAMOS filter type hugepage Usama Arif
2025-02-04 23:12   ` SeongJae Park
2025-02-05 13:52     ` Usama Arif
2025-02-05 22:05       ` SeongJae Park
2025-02-07 18:22     ` Usama Arif
2025-02-07 18:52       ` SeongJae Park
2025-02-03 22:55 ` [PATCH v4 5/6] Docs/ABI/damon: document DAMOS sysfs files to set the min/max folio_size Usama Arif
2025-02-04 23:13   ` SeongJae Park
2025-02-03 22:55 ` [PATCH v4 6/6] Docs/admin-guide/mm/damon/usage: Document hugepage filter type Usama Arif
2025-02-04 23:13   ` SeongJae Park
2025-02-04 23:20 ` [PATCH v4 0/6] mm/damon: add support for hugepages SeongJae Park

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250203225604.44742-3-usamaarif642@gmail.com \
    --to=usamaarif642@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=damon@lists.linux.dev \
    --cc=david@redhat.com \
    --cc=hannes@cmpxchg.org \
    --cc=kernel-team@meta.com \
    --cc=linux-mm@kvack.org \
    --cc=sj@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox