linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Yu Zhao <yuzhao@google.com>
To: linux-mm@kvack.org
Cc: Alex Shi <alexs@kernel.org>, Andi Kleen <ak@linux.intel.com>,
	 Andrew Morton <akpm@linux-foundation.org>,
	Dave Chinner <david@fromorbit.com>,
	 Dave Hansen <dave.hansen@linux.intel.com>,
	Donald Carr <sirspudd@gmail.com>,
	 Hillf Danton <hdanton@sina.com>, Jens Axboe <axboe@kernel.dk>,
	Johannes Weiner <hannes@cmpxchg.org>,
	 Jonathan Corbet <corbet@lwn.net>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	 Konstantin Kharlamov <hi-angel@yandex.ru>,
	Marcus Seyfarth <m.seyfarth@gmail.com>,
	 Matthew Wilcox <willy@infradead.org>,
	Mel Gorman <mgorman@suse.de>, Miaohe Lin <linmiaohe@huawei.com>,
	 Michael Larabel <michael@michaellarabel.com>,
	Michal Hocko <mhocko@suse.com>,
	 Michel Lespinasse <michel@lespinasse.org>,
	Rik van Riel <riel@surriel.com>, Roman Gushchin <guro@fb.com>,
	 Tim Chen <tim.c.chen@linux.intel.com>,
	Vlastimil Babka <vbabka@suse.cz>,  Yang Shi <shy828301@gmail.com>,
	Ying Huang <ying.huang@intel.com>, Zi Yan <ziy@nvidia.com>,
	 linux-kernel@vger.kernel.org, lkp@lists.01.org,
	page-reclaim@google.com,  Yu Zhao <yuzhao@google.com>,
	Konstantin Kharlamov <Hi-Angel@yandex.ru>
Subject: [PATCH v3 06/14] mm/workingset.c: refactor pack_shadow() and unpack_shadow()
Date: Thu, 20 May 2021 00:53:47 -0600	[thread overview]
Message-ID: <20210520065355.2736558-7-yuzhao@google.com> (raw)
In-Reply-To: <20210520065355.2736558-1-yuzhao@google.com>

This patches moves the bucket order and PageWorkingset() out of
pack_shadow() and unpack_shadow(). It has no merits on its own but
makes the upcoming changes to mm/workingset.c less diffy.

Signed-off-by: Yu Zhao <yuzhao@google.com>
Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
---
 mm/workingset.c | 53 ++++++++++++++++++++-----------------------------
 1 file changed, 22 insertions(+), 31 deletions(-)

diff --git a/mm/workingset.c b/mm/workingset.c
index b7cdeca5a76d..edb8aed2587e 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -168,9 +168,9 @@
  * refault distance will immediately activate the refaulting page.
  */
 
-#define EVICTION_SHIFT	((BITS_PER_LONG - BITS_PER_XA_VALUE) +	\
-			 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
-#define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
+#define EVICTION_SHIFT		(BITS_PER_XA_VALUE - MEM_CGROUP_ID_SHIFT - NODES_SHIFT)
+#define EVICTION_MASK		(BIT(EVICTION_SHIFT) - 1)
+#define WORKINGSET_WIDTH	1
 
 /*
  * Eviction timestamps need to be able to cover the full range of
@@ -182,36 +182,23 @@
  */
 static unsigned int bucket_order __read_mostly;
 
-static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
-			 bool workingset)
+static void *pack_shadow(int memcg_id, struct pglist_data *pgdat, unsigned long val)
 {
-	eviction >>= bucket_order;
-	eviction &= EVICTION_MASK;
-	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
-	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
-	eviction = (eviction << 1) | workingset;
+	val = (val << MEM_CGROUP_ID_SHIFT) | memcg_id;
+	val = (val << NODES_SHIFT) | pgdat->node_id;
 
-	return xa_mk_value(eviction);
+	return xa_mk_value(val);
 }
 
-static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
-			  unsigned long *evictionp, bool *workingsetp)
+static unsigned long unpack_shadow(void *shadow, int *memcg_id, struct pglist_data **pgdat)
 {
-	unsigned long entry = xa_to_value(shadow);
-	int memcgid, nid;
-	bool workingset;
+	unsigned long val = xa_to_value(shadow);
 
-	workingset = entry & 1;
-	entry >>= 1;
-	nid = entry & ((1UL << NODES_SHIFT) - 1);
-	entry >>= NODES_SHIFT;
-	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
-	entry >>= MEM_CGROUP_ID_SHIFT;
+	*pgdat = NODE_DATA(val & (BIT(NODES_SHIFT) - 1));
+	val >>= NODES_SHIFT;
+	*memcg_id = val & (BIT(MEM_CGROUP_ID_SHIFT) - 1);
 
-	*memcgidp = memcgid;
-	*pgdat = NODE_DATA(nid);
-	*evictionp = entry << bucket_order;
-	*workingsetp = workingset;
+	return val >> MEM_CGROUP_ID_SHIFT;
 }
 
 /**
@@ -266,8 +253,10 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
 	/* XXX: target_memcg can be NULL, go through lruvec */
 	memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
 	eviction = atomic_long_read(&lruvec->nonresident_age);
+	eviction >>= bucket_order;
+	eviction = (eviction << WORKINGSET_WIDTH) | PageWorkingset(page);
 	workingset_age_nonresident(lruvec, thp_nr_pages(page));
-	return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
+	return pack_shadow(memcgid, pgdat, eviction);
 }
 
 /**
@@ -294,7 +283,7 @@ void workingset_refault(struct page *page, void *shadow)
 	bool workingset;
 	int memcgid;
 
-	unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
+	eviction = unpack_shadow(shadow, &memcgid, &pgdat);
 
 	rcu_read_lock();
 	/*
@@ -318,6 +307,8 @@ void workingset_refault(struct page *page, void *shadow)
 		goto out;
 	eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
 	refault = atomic_long_read(&eviction_lruvec->nonresident_age);
+	workingset = eviction & (BIT(WORKINGSET_WIDTH) - 1);
+	eviction = (eviction >> WORKINGSET_WIDTH) << bucket_order;
 
 	/*
 	 * Calculate the refault distance
@@ -335,7 +326,7 @@ void workingset_refault(struct page *page, void *shadow)
 	 * longest time, so the occasional inappropriate activation
 	 * leading to pressure on the active list is not a problem.
 	 */
-	refault_distance = (refault - eviction) & EVICTION_MASK;
+	refault_distance = (refault - eviction) & (EVICTION_MASK >> WORKINGSET_WIDTH);
 
 	/*
 	 * The activation decision for this page is made at the level
@@ -593,7 +584,7 @@ static int __init workingset_init(void)
 	unsigned int max_order;
 	int ret;
 
-	BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
+	BUILD_BUG_ON(EVICTION_SHIFT < WORKINGSET_WIDTH);
 	/*
 	 * Calculate the eviction bucket size to cover the longest
 	 * actionable refault distance, which is currently half of
@@ -601,7 +592,7 @@ static int __init workingset_init(void)
 	 * some more pages at runtime, so keep working with up to
 	 * double the initial memory by using totalram_pages as-is.
 	 */
-	timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
+	timestamp_bits = EVICTION_SHIFT - WORKINGSET_WIDTH;
 	max_order = fls_long(totalram_pages() - 1);
 	if (max_order > timestamp_bits)
 		bucket_order = max_order - timestamp_bits;
-- 
2.31.1.751.gd2f1c929bd-goog



  parent reply	other threads:[~2021-05-20  6:54 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-20  6:53 [PATCH v3 00/14] Multigenerational LRU Framework Yu Zhao
2021-05-20  6:53 ` [PATCH v3 01/14] include/linux/memcontrol.h: do not warn in page_memcg_rcu() if !CONFIG_MEMCG Yu Zhao
2021-05-20  6:53 ` [PATCH v3 02/14] include/linux/nodemask.h: define next_memory_node() if !CONFIG_NUMA Yu Zhao
2021-05-20  6:53 ` [PATCH v3 03/14] include/linux/cgroup.h: export cgroup_mutex Yu Zhao
2021-05-20  6:53 ` [PATCH v3 04/14] mm, x86: support the access bit on non-leaf PMD entries Yu Zhao
2021-05-20  6:53 ` [PATCH v3 05/14] mm/vmscan.c: refactor shrink_node() Yu Zhao
2021-05-20  6:53 ` Yu Zhao [this message]
2021-05-20  6:53 ` [PATCH v3 07/14] mm: multigenerational lru: groundwork Yu Zhao
2021-05-20  6:53 ` [PATCH v3 08/14] mm: multigenerational lru: activation Yu Zhao
2021-05-20  6:53 ` [PATCH v3 09/14] mm: multigenerational lru: mm_struct list Yu Zhao
2021-05-20  6:53 ` [PATCH v3 10/14] mm: multigenerational lru: aging Yu Zhao
2021-05-20  6:53 ` [PATCH v3 11/14] mm: multigenerational lru: eviction Yu Zhao
2021-05-20  6:53 ` [PATCH v3 12/14] mm: multigenerational lru: user interface Yu Zhao
2021-05-20  6:53 ` [PATCH v3 13/14] mm: multigenerational lru: Kconfig Yu Zhao
2021-05-20  6:53 ` [PATCH v3 14/14] mm: multigenerational lru: documentation Yu Zhao
2021-07-28  1:59 ` [PATCH v3 00/14] Multigenerational LRU Framework Hillf Danton
2021-08-01 17:21   ` Yu Zhao
     [not found]   ` <20210807150459.294f8c03@mail.inbox.lv>
2021-08-07  7:51     ` Hillf Danton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210520065355.2736558-7-yuzhao@google.com \
    --to=yuzhao@google.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexs@kernel.org \
    --cc=axboe@kernel.dk \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=david@fromorbit.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=hdanton@sina.com \
    --cc=hi-angel@yandex.ru \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linmiaohe@huawei.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lkp@lists.01.org \
    --cc=m.seyfarth@gmail.com \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=michael@michaellarabel.com \
    --cc=michel@lespinasse.org \
    --cc=page-reclaim@google.com \
    --cc=riel@surriel.com \
    --cc=shy828301@gmail.com \
    --cc=sirspudd@gmail.com \
    --cc=tim.c.chen@linux.intel.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    --cc=ying.huang@intel.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox