From: Matteo Rizzo <matteorizzo@google.com>
To: cl@linux.com, penberg@kernel.org, rientjes@google.com,
iamjoonsoo.kim@lge.com, akpm@linux-foundation.org,
vbabka@suse.cz, roman.gushchin@linux.dev, 42.hyeyoo@gmail.com,
keescook@chromium.org, linux-kernel@vger.kernel.org,
linux-doc@vger.kernel.org, linux-mm@kvack.org,
linux-hardening@vger.kernel.org, tglx@linutronix.de,
mingo@redhat.com, bp@alien8.de, dave.hansen@linux.intel.com,
x86@kernel.org, hpa@zytor.com, corbet@lwn.net, luto@kernel.org,
peterz@infradead.org
Cc: jannh@google.com, matteorizzo@google.com, evn@google.com,
poprdi@google.com, jordyzomer@google.com
Subject: [RFC PATCH 03/14] mm/slub: move kmem_cache_order_objects to slab.h
Date: Fri, 15 Sep 2023 10:59:22 +0000 [thread overview]
Message-ID: <20230915105933.495735-4-matteorizzo@google.com> (raw)
In-Reply-To: <20230915105933.495735-1-matteorizzo@google.com>
From: Jann Horn <jannh@google.com>
This is refactoring for SLAB_VIRTUAL. The implementation needs to know
the order of the virtual memory region allocated to each slab to know
how much physical memory to allocate when the slab is reused. We reuse
kmem_cache_order_objects for this, so we have to move it before struct
slab.
Signed-off-by: Jann Horn <jannh@google.com>
Co-developed-by: Matteo Rizzo <matteorizzo@google.com>
Signed-off-by: Matteo Rizzo <matteorizzo@google.com>
---
include/linux/slub_def.h | 9 ---------
mm/slab.h | 22 ++++++++++++++++++++++
mm/slub.c | 12 ------------
3 files changed, 22 insertions(+), 21 deletions(-)
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index deb90cf4bffb..0adf5ba8241b 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -83,15 +83,6 @@ struct kmem_cache_cpu {
#define slub_percpu_partial_read_once(c) NULL
#endif // CONFIG_SLUB_CPU_PARTIAL
-/*
- * Word size structure that can be atomically updated or read and that
- * contains both the order and the number of objects that a slab of the
- * given order would contain.
- */
-struct kmem_cache_order_objects {
- unsigned int x;
-};
-
/*
* Slab cache management.
*/
diff --git a/mm/slab.h b/mm/slab.h
index 25e41dd6087e..3fe0d1e26e26 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -38,6 +38,15 @@ typedef union {
freelist_full_t full;
} freelist_aba_t;
+/*
+ * Word size structure that can be atomically updated or read and that
+ * contains both the order and the number of objects that a slab of the
+ * given order would contain.
+ */
+struct kmem_cache_order_objects {
+ unsigned int x;
+};
+
/* Reuses the bits in struct page */
struct slab {
unsigned long __page_flags;
@@ -227,6 +236,19 @@ static inline struct slab *virt_to_slab(const void *addr)
return folio_slab(folio);
}
+#define OO_SHIFT 16
+#define OO_MASK ((1 << OO_SHIFT) - 1)
+
+static inline unsigned int oo_order(struct kmem_cache_order_objects x)
+{
+ return x.x >> OO_SHIFT;
+}
+
+static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
+{
+ return x.x & OO_MASK;
+}
+
static inline int slab_order(const struct slab *slab)
{
return folio_order((struct folio *)slab_folio(slab));
diff --git a/mm/slub.c b/mm/slub.c
index b69916ab7aa8..df2529c03bd3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -284,8 +284,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
*/
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
-#define OO_SHIFT 16
-#define OO_MASK ((1 << OO_SHIFT) - 1)
#define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
/* Internal SLUB flags */
@@ -473,16 +471,6 @@ static inline struct kmem_cache_order_objects oo_make(unsigned int order,
return x;
}
-static inline unsigned int oo_order(struct kmem_cache_order_objects x)
-{
- return x.x >> OO_SHIFT;
-}
-
-static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
-{
- return x.x & OO_MASK;
-}
-
#ifdef CONFIG_SLUB_CPU_PARTIAL
static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
{
--
2.42.0.459.ge4e396fd5e-goog
next prev parent reply other threads:[~2023-09-15 10:59 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-15 10:59 [RFC PATCH 00/14] Prevent cross-cache attacks in the SLUB allocator Matteo Rizzo
2023-09-15 10:59 ` [RFC PATCH 01/14] mm/slub: don't try to dereference invalid freepointers Matteo Rizzo
2023-09-15 20:50 ` Kees Cook
2023-09-30 11:04 ` Hyeonggon Yoo
2023-09-15 10:59 ` [RFC PATCH 02/14] mm/slub: add is_slab_addr/is_slab_page helpers Matteo Rizzo
2023-09-15 20:55 ` Kees Cook
2023-09-15 10:59 ` Matteo Rizzo [this message]
2023-09-15 20:56 ` [RFC PATCH 03/14] mm/slub: move kmem_cache_order_objects to slab.h Kees Cook
2023-09-15 10:59 ` [RFC PATCH 04/14] mm: use virt_to_slab instead of folio_slab Matteo Rizzo
2023-09-15 20:59 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 05/14] mm/slub: create folio_set/clear_slab helpers Matteo Rizzo
2023-09-15 21:02 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 06/14] mm/slub: pass additional args to alloc_slab_page Matteo Rizzo
2023-09-15 21:03 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 07/14] mm/slub: pass slab pointer to the freeptr decode helper Matteo Rizzo
2023-09-15 21:06 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 08/14] security: introduce CONFIG_SLAB_VIRTUAL Matteo Rizzo
2023-09-15 21:07 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 09/14] mm/slub: add the slab freelists to kmem_cache Matteo Rizzo
2023-09-15 21:08 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 10/14] x86: Create virtual memory region for SLUB Matteo Rizzo
2023-09-15 21:13 ` Kees Cook
2023-09-15 21:49 ` Dave Hansen
2023-09-18 8:54 ` Matteo Rizzo
2023-09-15 10:59 ` [RFC PATCH 11/14] mm/slub: allocate slabs from virtual memory Matteo Rizzo
2023-09-15 21:22 ` Kees Cook
2023-09-15 21:57 ` Dave Hansen
2023-10-11 9:17 ` Matteo Rizzo
2023-09-15 10:59 ` [RFC PATCH 12/14] mm/slub: introduce the deallocated_pages sysfs attribute Matteo Rizzo
2023-09-15 21:23 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 13/14] mm/slub: sanity-check freepointers Matteo Rizzo
2023-09-15 21:26 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 14/14] security: add documentation for SLAB_VIRTUAL Matteo Rizzo
2023-09-15 21:34 ` Kees Cook
2023-09-20 9:04 ` Vlastimil Babka
2023-09-15 15:19 ` [RFC PATCH 00/14] Prevent cross-cache attacks in the SLUB allocator Dave Hansen
2023-09-15 16:30 ` Lameter, Christopher
2023-09-18 12:08 ` Matteo Rizzo
2023-09-18 17:39 ` Ingo Molnar
2023-09-18 18:05 ` Linus Torvalds
2023-09-19 15:48 ` Matteo Rizzo
2023-09-19 16:02 ` Dave Hansen
2023-09-19 17:56 ` Kees Cook
2023-09-19 18:49 ` Linus Torvalds
2023-09-19 13:42 ` Matteo Rizzo
2023-09-19 15:56 ` Dave Hansen
2023-09-20 7:44 ` Ingo Molnar
2023-09-20 8:49 ` Vlastimil Babka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230915105933.495735-4-matteorizzo@google.com \
--to=matteorizzo@google.com \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=bp@alien8.de \
--cc=cl@linux.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=evn@google.com \
--cc=hpa@zytor.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=jannh@google.com \
--cc=jordyzomer@google.com \
--cc=keescook@chromium.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-hardening@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mingo@redhat.com \
--cc=penberg@kernel.org \
--cc=peterz@infradead.org \
--cc=poprdi@google.com \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=tglx@linutronix.de \
--cc=vbabka@suse.cz \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox