From: Hugh Dickins <hughd@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Andi Kleen <ak@linux.intel.com>, Christoph Lameter <cl@linux.com>,
Matthew Wilcox <willy@infradead.org>,
Mike Kravetz <mike.kravetz@oracle.com>,
David Hildenbrand <david@redhat.com>,
Suren Baghdasaryan <surenb@google.com>,
Yang Shi <shy828301@gmail.com>,
Sidhartha Kumar <sidhartha.kumar@oracle.com>,
Vishal Moola <vishal.moola@gmail.com>,
Kefeng Wang <wangkefeng.wang@huawei.com>,
Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Tejun Heo <tj@kernel.org>,
Mel Gorman <mgorman@techsingularity.net>,
Michal Hocko <mhocko@suse.com>,
"Huang, Ying" <ying.huang@intel.com>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH v2 05/12] mempolicy trivia: slightly more consistent naming
Date: Tue, 3 Oct 2023 02:20:14 -0700 (PDT) [thread overview]
Message-ID: <68287974-b6ae-7df-4ba-d19ddd69cbf@google.com> (raw)
In-Reply-To: <ebc0987e-beff-8bfb-9283-234c2cbd17c5@google.com>
Before getting down to work, do a little cleanup, mainly of inconsistent
variable naming. I gave up trying to rationalize mpol versus pol versus
policy, and node versus nid, but let's avoid p and nd. Remove a few
superfluous blank lines, but add one; and here prefer vma->vm_policy to
vma_policy(vma) - the latter being appropriate in other sources, which
have to allow for !CONFIG_NUMA. That intriguing line about KERNEL_DS?
should have gone in v2.6.15, when numa_policy_init() stopped using
set_mempolicy(2)'s system call handler.
Signed-off-by: Hugh Dickins <hughd@google.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/mempolicy.h | 11 +++----
mm/mempolicy.c | 73 +++++++++++++++++++----------------------
2 files changed, 38 insertions(+), 46 deletions(-)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 6c2754d7bfed..325b7200c311 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -126,10 +126,9 @@ struct shared_policy {
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
-int mpol_set_shared_policy(struct shared_policy *info,
- struct vm_area_struct *vma,
- struct mempolicy *new);
-void mpol_free_shared_policy(struct shared_policy *p);
+int mpol_set_shared_policy(struct shared_policy *sp,
+ struct vm_area_struct *vma, struct mempolicy *mpol);
+void mpol_free_shared_policy(struct shared_policy *sp);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
unsigned long idx);
@@ -193,7 +192,7 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
return true;
}
-static inline void mpol_put(struct mempolicy *p)
+static inline void mpol_put(struct mempolicy *pol)
{
}
@@ -212,7 +211,7 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp,
{
}
-static inline void mpol_free_shared_policy(struct shared_policy *p)
+static inline void mpol_free_shared_policy(struct shared_policy *sp)
{
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 780498662b75..c7906a034959 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -25,7 +25,7 @@
* to the last. It would be better if bind would truly restrict
* the allocation to memory nodes instead
*
- * preferred Try a specific node first before normal fallback.
+ * preferred Try a specific node first before normal fallback.
* As a special case NUMA_NO_NODE here means do the allocation
* on the local CPU. This is normally identical to default,
* but useful to set in a VMA when you have a non default
@@ -52,7 +52,7 @@
* on systems with highmem kernel lowmem allocation don't get policied.
* Same with GFP_DMA allocations.
*
- * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
+ * For shmem/tmpfs shared memory the policy is shared between
* all users and remembered even when nobody has memory mapped.
*/
@@ -291,6 +291,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
return ERR_PTR(-EINVAL);
} else if (nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
+
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!policy)
return ERR_PTR(-ENOMEM);
@@ -303,11 +304,11 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
}
/* Slow path of a mpol destructor. */
-void __mpol_put(struct mempolicy *p)
+void __mpol_put(struct mempolicy *pol)
{
- if (!atomic_dec_and_test(&p->refcnt))
+ if (!atomic_dec_and_test(&pol->refcnt))
return;
- kmem_cache_free(policy_cache, p);
+ kmem_cache_free(policy_cache, pol);
}
static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
@@ -364,7 +365,6 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
*
* Called with task's alloc_lock held.
*/
-
void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
{
mpol_rebind_policy(tsk->mempolicy, new);
@@ -375,7 +375,6 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
*
* Call holding a reference to mm. Takes mm->mmap_lock during call.
*/
-
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;
@@ -757,7 +756,7 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
* This must be called with the mmap_lock held for writing.
*/
static int vma_replace_policy(struct vm_area_struct *vma,
- struct mempolicy *pol)
+ struct mempolicy *pol)
{
int err;
struct mempolicy *old;
@@ -803,7 +802,7 @@ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
vmstart = vma->vm_start;
}
- if (mpol_equal(vma_policy(vma), new_pol)) {
+ if (mpol_equal(vma->vm_policy, new_pol)) {
*prev = vma;
return 0;
}
@@ -875,18 +874,18 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
*
* Called with task's alloc_lock held
*/
-static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
+static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes)
{
nodes_clear(*nodes);
- if (p == &default_policy)
+ if (pol == &default_policy)
return;
- switch (p->mode) {
+ switch (pol->mode) {
case MPOL_BIND:
case MPOL_INTERLEAVE:
case MPOL_PREFERRED:
case MPOL_PREFERRED_MANY:
- *nodes = p->nodes;
+ *nodes = pol->nodes;
break;
case MPOL_LOCAL:
/* return empty node mask for local allocation */
@@ -1654,7 +1653,6 @@ static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
out_put:
put_task_struct(task);
goto out;
-
}
SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
@@ -1664,7 +1662,6 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
}
-
/* Retrieve NUMA policy */
static int kernel_get_mempolicy(int __user *policy,
unsigned long __user *nmask,
@@ -1847,10 +1844,10 @@ nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
* policy_node() is always coupled with policy_nodemask(), which
* secures the nodemask limit for 'bind' and 'prefer-many' policy.
*/
-static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
+static int policy_node(gfp_t gfp, struct mempolicy *policy, int nid)
{
if (policy->mode == MPOL_PREFERRED) {
- nd = first_node(policy->nodes);
+ nid = first_node(policy->nodes);
} else {
/*
* __GFP_THISNODE shouldn't even be used with the bind policy
@@ -1865,19 +1862,18 @@ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
policy->home_node != NUMA_NO_NODE)
return policy->home_node;
- return nd;
+ return nid;
}
/* Do dynamic interleaving for a process */
-static unsigned interleave_nodes(struct mempolicy *policy)
+static unsigned int interleave_nodes(struct mempolicy *policy)
{
- unsigned next;
- struct task_struct *me = current;
+ unsigned int nid;
- next = next_node_in(me->il_prev, policy->nodes);
- if (next < MAX_NUMNODES)
- me->il_prev = next;
- return next;
+ nid = next_node_in(current->il_prev, policy->nodes);
+ if (nid < MAX_NUMNODES)
+ current->il_prev = nid;
+ return nid;
}
/*
@@ -2367,7 +2363,7 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
{
- struct mempolicy *pol = mpol_dup(vma_policy(src));
+ struct mempolicy *pol = mpol_dup(src->vm_policy);
if (IS_ERR(pol))
return PTR_ERR(pol);
@@ -2791,40 +2787,40 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
}
}
-int mpol_set_shared_policy(struct shared_policy *info,
- struct vm_area_struct *vma, struct mempolicy *npol)
+int mpol_set_shared_policy(struct shared_policy *sp,
+ struct vm_area_struct *vma, struct mempolicy *pol)
{
int err;
struct sp_node *new = NULL;
unsigned long sz = vma_pages(vma);
- if (npol) {
- new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
+ if (pol) {
+ new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, pol);
if (!new)
return -ENOMEM;
}
- err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
+ err = shared_policy_replace(sp, vma->vm_pgoff, vma->vm_pgoff + sz, new);
if (err && new)
sp_free(new);
return err;
}
/* Free a backing policy store on inode delete. */
-void mpol_free_shared_policy(struct shared_policy *p)
+void mpol_free_shared_policy(struct shared_policy *sp)
{
struct sp_node *n;
struct rb_node *next;
- if (!p->root.rb_node)
+ if (!sp->root.rb_node)
return;
- write_lock(&p->lock);
- next = rb_first(&p->root);
+ write_lock(&sp->lock);
+ next = rb_first(&sp->root);
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
- sp_delete(p, n);
+ sp_delete(sp, n);
}
- write_unlock(&p->lock);
+ write_unlock(&sp->lock);
}
#ifdef CONFIG_NUMA_BALANCING
@@ -2874,7 +2870,6 @@ static inline void __init check_numabalancing_enable(void)
}
#endif /* CONFIG_NUMA_BALANCING */
-/* assumes fs == KERNEL_DS */
void __init numa_policy_init(void)
{
nodemask_t interleave_nodes;
@@ -2937,7 +2932,6 @@ void numa_default_policy(void)
/*
* Parse and format mempolicy from/to strings
*/
-
static const char * const policy_modes[] =
{
[MPOL_DEFAULT] = "default",
@@ -2948,7 +2942,6 @@ static const char * const policy_modes[] =
[MPOL_PREFERRED_MANY] = "prefer (many)",
};
-
#ifdef CONFIG_TMPFS
/**
* mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
--
2.35.3
next prev parent reply other threads:[~2023-10-03 9:20 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-03 9:12 [PATCH v2 00/12] mempolicy: cleanups leading to NUMA mpol without vma Hugh Dickins
2023-10-03 9:15 ` [PATCH v2 01/12] hugetlbfs: drop shared NUMA mempolicy pretence Hugh Dickins
2023-10-03 9:16 ` [PATCH v2 02/12] kernfs: drop shared NUMA mempolicy hooks Hugh Dickins
2023-10-03 9:17 ` [PATCH v2 03/12] mempolicy: fix migrate_pages(2) syscall return nr_failed Hugh Dickins
2023-10-07 7:27 ` Huang, Ying
2023-10-03 9:19 ` [PATCH v2 04/12] mempolicy trivia: delete those ancient pr_debug()s Hugh Dickins
2023-10-03 9:20 ` Hugh Dickins [this message]
2023-10-03 9:21 ` [PATCH v2 06/12] mempolicy trivia: use pgoff_t in shared mempolicy tree Hugh Dickins
2023-10-03 9:22 ` [PATCH v2 07/12] mempolicy: mpol_shared_policy_init() without pseudo-vma Hugh Dickins
2023-10-03 9:24 ` [PATCH v2 08/12] mempolicy: remove confusing MPOL_MF_LAZY dead code Hugh Dickins
2023-10-03 22:28 ` Yang Shi
2023-10-03 9:25 ` [PATCH v2 09/12] mm: add page_rmappable_folio() wrapper Hugh Dickins
2023-10-03 9:26 ` [PATCH v2 10/12] mempolicy: alloc_pages_mpol() for NUMA policy without vma Hugh Dickins
2023-10-19 20:39 ` [PATCH v3 " Hugh Dickins
2023-10-23 16:53 ` domenico cerasuolo
2023-10-23 17:53 ` Andrew Morton
2023-10-23 18:10 ` domenico cerasuolo
2023-10-23 19:05 ` Johannes Weiner
2023-10-23 19:48 ` Hugh Dickins
2023-10-24 6:44 ` [PATCH] mempolicy: alloc_pages_mpol() for NUMA policy without vma: fix Hugh Dickins
2023-10-24 8:17 ` kernel test robot
2023-10-24 15:56 ` Hugh Dickins
2023-10-24 16:09 ` [PATCH v2] " Hugh Dickins
2023-10-23 18:34 ` [PATCH v3 10/12] mempolicy: alloc_pages_mpol() for NUMA policy without vma Zi Yan
2023-10-23 21:10 ` Hugh Dickins
2023-10-23 21:13 ` Zi Yan
2023-10-03 9:27 ` [PATCH v2 11/12] mempolicy: mmap_lock is not needed while migrating folios Hugh Dickins
2023-10-03 9:29 ` [PATCH v2 12/12] mempolicy: migration attempt to match interleave nodes Hugh Dickins
2023-10-24 6:50 ` [PATCH] mempolicy: migration attempt to match interleave nodes: fix Hugh Dickins
2023-10-24 15:18 ` Liam R. Howlett
2023-10-24 16:32 ` Hugh Dickins
2023-10-24 16:45 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=68287974-b6ae-7df-4ba-d19ddd69cbf@google.com \
--to=hughd@google.com \
--cc=ak@linux.intel.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=david@redhat.com \
--cc=gregkh@linuxfoundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@techsingularity.net \
--cc=mhocko@suse.com \
--cc=mike.kravetz@oracle.com \
--cc=shy828301@gmail.com \
--cc=sidhartha.kumar@oracle.com \
--cc=surenb@google.com \
--cc=tj@kernel.org \
--cc=vishal.moola@gmail.com \
--cc=wangkefeng.wang@huawei.com \
--cc=willy@infradead.org \
--cc=ying.huang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox