From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>,
David Miller <davem@davemloft.net>,
Andrew Morton <akpm@linux-foundation.org>,
Daniel Phillips <phillips@google.com>,
Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <clameter@sgi.com>,
Matt Mackall <mpm@selenic.com>,
Lee Schermerhorn <Lee.Schermerhorn@hp.com>,
Steve Dickson <SteveD@redhat.com>
Subject: [PATCH 06/10] mm: kmem_estimate_pages()
Date: Mon, 06 Aug 2007 12:29:28 +0200 [thread overview]
Message-ID: <20070806103658.990602000@chello.nl> (raw)
In-Reply-To: <20070806102922.907530000@chello.nl>
[-- Attachment #1: mm-kmem_estimate_pages.patch --]
[-- Type: text/plain, Size: 4335 bytes --]
Provide a method to get the upper bound on the pages needed to allocate
a given number of objects from a given kmem_cache.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@sgi.com>
---
include/linux/slab.h | 3 +
mm/slub.c | 90 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 93 insertions(+)
Index: linux-2.6-2/include/linux/slab.h
===================================================================
--- linux-2.6-2.orig/include/linux/slab.h
+++ linux-2.6-2/include/linux/slab.h
@@ -58,6 +58,7 @@ void kmem_cache_free(struct kmem_cache *
unsigned int kmem_cache_size(struct kmem_cache *);
const char *kmem_cache_name(struct kmem_cache *);
int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
+unsigned kmem_estimate_pages(struct kmem_cache *cachep, gfp_t flags, int objects);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -92,6 +93,8 @@ int kmem_ptr_validate(struct kmem_cache
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
size_t ksize(const void *);
+unsigned kestimate_single(size_t, gfp_t, int);
+unsigned kestimate(gfp_t, size_t);
/*
* Allocator specific definitions. These are mainly used to establish optimized
Index: linux-2.6-2/mm/slub.c
===================================================================
--- linux-2.6-2.orig/mm/slub.c
+++ linux-2.6-2/mm/slub.c
@@ -2206,6 +2206,45 @@ const char *kmem_cache_name(struct kmem_
EXPORT_SYMBOL(kmem_cache_name);
/*
+ * return the max number of pages required to allocated count
+ * objects from the given cache
+ */
+unsigned kmem_estimate_pages(struct kmem_cache *s, gfp_t flags, int objects)
+{
+ unsigned long slabs;
+
+ if (WARN_ON(!s) || WARN_ON(!s->objects))
+ return 0;
+
+ slabs = DIV_ROUND_UP(objects, s->objects);
+
+ /*
+ * Account the possible additional overhead if the slab holds more that
+ * one object.
+ */
+ if (s->objects > 1) {
+ if (!(gfp_to_alloc_flags(flags) & ALLOC_NO_WATERMARKS)) {
+ /*
+ * Account the possible additional overhead if per cpu
+ * slabs are currently empty and have to be allocated.
+ * This is very unlikely but a possible scenario
+ * immediately after kmem_cache_shrink.
+ */
+ slabs += num_online_cpus();
+ } else {
+ /*
+ * when using the reserves there will be only a single
+ * slab per kmem_cache.
+ */
+ slabs += 1;
+ }
+ }
+
+ return slabs << s->order;
+}
+EXPORT_SYMBOL_GPL(kmem_estimate_pages);
+
+/*
* Attempt to free all slabs on a node. Return the number of slabs we
* were unable to free.
*/
@@ -2508,6 +2547,57 @@ void kfree(const void *x)
EXPORT_SYMBOL(kfree);
/*
+ * return the max number of pages required to allocate @count objects
+ * of @size bytes from kmalloc given @flags.
+ */
+unsigned kestimate_single(size_t size, gfp_t flags, int count)
+{
+ struct kmem_cache *s = get_slab(size, flags);
+ if (!s)
+ return 0;
+
+ return kmem_estimate_pages(s, flags, count);
+
+}
+EXPORT_SYMBOL_GPL(kestimate_single);
+
+/*
+ * return the max number of pages required to allocate @bytes from kmalloc
+ * in an unspecified number of allocation of heterogeneous size.
+ */
+unsigned kestimate(gfp_t flags, size_t bytes)
+{
+ int i;
+ unsigned long pages;
+
+ /*
+ * multiply by two, in order to account the worst case slack space
+ * due to the power-of-two allocation sizes.
+ */
+ pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE);
+
+ /*
+ * add the kmem_cache overhead of each possible kmalloc cache
+ */
+ for (i = 1; i < KMALLOC_SHIFT_HIGH; i++) {
+ struct kmem_cache *s;
+
+#ifdef CONFIG_ZONE_DMA
+ if (unlikely(flags & SLUB_DMA))
+ s = &dma_kmalloc_cache(i, flags);
+ else
+#endif
+ s = &kmalloc_caches[i];
+
+ if (s)
+ pages += kmem_estimate_pages(s, flags, 0);
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL_GPL(kestimate);
+
+/*
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
* the remaining slabs by the number of items in use. The slabs with the
* most items in use come first. New allocations will then fill those up
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2007-08-06 10:29 UTC|newest]
Thread overview: 85+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-08-06 10:29 [PATCH 00/10] foundations for reserve-based allocation Peter Zijlstra
2007-08-06 10:29 ` [PATCH 01/10] mm: gfp_to_alloc_flags() Peter Zijlstra
2007-08-06 10:29 ` [PATCH 02/10] mm: system wide ALLOC_NO_WATERMARK Peter Zijlstra
2007-08-06 18:11 ` Christoph Lameter
2007-08-06 18:21 ` Daniel Phillips
2007-08-06 18:31 ` Peter Zijlstra
2007-08-06 18:43 ` Daniel Phillips
2007-08-06 19:11 ` Christoph Lameter
2007-08-06 19:31 ` Peter Zijlstra
2007-08-06 20:12 ` Christoph Lameter
2007-08-06 18:42 ` Christoph Lameter
2007-08-06 18:48 ` Daniel Phillips
2007-08-06 18:51 ` Christoph Lameter
2007-08-06 19:15 ` Daniel Phillips
2007-08-06 20:12 ` Matt Mackall
2007-08-06 20:19 ` Christoph Lameter
2007-08-06 20:26 ` Peter Zijlstra
2007-08-06 21:05 ` Christoph Lameter
2007-08-06 22:59 ` Daniel Phillips
2007-08-06 23:14 ` Christoph Lameter
2007-08-06 23:49 ` Daniel Phillips
2007-08-07 22:18 ` Christoph Lameter
2007-08-08 7:24 ` Peter Zijlstra
2007-08-08 18:06 ` Christoph Lameter
2007-08-08 7:37 ` Daniel Phillips
2007-08-08 18:09 ` Christoph Lameter
2007-08-09 18:41 ` Daniel Phillips
2007-08-09 18:49 ` Christoph Lameter
2007-08-10 0:17 ` Daniel Phillips
2007-08-10 1:48 ` Christoph Lameter
2007-08-10 3:34 ` Daniel Phillips
2007-08-10 3:48 ` Christoph Lameter
2007-08-10 8:15 ` Daniel Phillips
2007-08-10 17:46 ` Christoph Lameter
2007-08-10 23:25 ` Daniel Phillips
2007-08-13 6:55 ` Daniel Phillips
2007-08-13 23:04 ` Christoph Lameter
2007-08-06 20:27 ` Andrew Morton
2007-08-06 23:16 ` Daniel Phillips
2007-08-06 22:47 ` Daniel Phillips
2007-08-06 10:29 ` [PATCH 03/10] mm: tag reseve pages Peter Zijlstra
2007-08-06 18:11 ` Christoph Lameter
2007-08-06 18:13 ` Daniel Phillips
2007-08-06 18:28 ` Peter Zijlstra
2007-08-06 19:34 ` Andi Kleen
2007-08-06 18:43 ` Christoph Lameter
2007-08-06 18:47 ` Peter Zijlstra
2007-08-06 18:59 ` Andi Kleen
2007-08-06 19:09 ` Christoph Lameter
2007-08-06 19:10 ` Andrew Morton
2007-08-06 19:16 ` Christoph Lameter
2007-08-06 19:38 ` Matt Mackall
2007-08-06 20:18 ` Andi Kleen
2007-08-06 10:29 ` [PATCH 04/10] mm: slub: add knowledge of reserve pages Peter Zijlstra
2007-08-08 0:13 ` Christoph Lameter
2007-08-08 1:44 ` Matt Mackall
2007-08-08 17:13 ` Christoph Lameter
2007-08-08 17:39 ` Andrew Morton
2007-08-08 17:57 ` Christoph Lameter
2007-08-08 18:46 ` Andrew Morton
2007-08-10 1:54 ` Daniel Phillips
2007-08-10 2:01 ` Christoph Lameter
2007-08-20 7:38 ` Peter Zijlstra
2007-08-20 7:43 ` Peter Zijlstra
2007-08-20 9:12 ` Pekka J Enberg
2007-08-20 9:17 ` Peter Zijlstra
2007-08-20 9:28 ` Pekka Enberg
2007-08-20 19:26 ` Christoph Lameter
2007-08-20 20:08 ` Peter Zijlstra
2007-08-06 10:29 ` [PATCH 05/10] mm: allow mempool to fall back to memalloc reserves Peter Zijlstra
2007-08-06 10:29 ` Peter Zijlstra [this message]
2007-08-06 10:29 ` [PATCH 07/10] mm: allow PF_MEMALLOC from softirq context Peter Zijlstra
2007-08-06 10:29 ` [PATCH 08/10] mm: serialize access to min_free_kbytes Peter Zijlstra
2007-08-06 10:29 ` [PATCH 09/10] mm: emergency pool Peter Zijlstra
2007-08-06 10:29 ` [PATCH 10/10] mm: __GFP_MEMALLOC Peter Zijlstra
2007-08-06 17:35 ` [PATCH 00/10] foundations for reserve-based allocation Daniel Phillips
2007-08-06 18:17 ` Peter Zijlstra
2007-08-06 18:40 ` Daniel Phillips
2007-08-06 19:31 ` Daniel Phillips
2007-08-06 19:36 ` Peter Zijlstra
2007-08-06 19:53 ` Daniel Phillips
2007-08-06 17:56 ` Christoph Lameter
2007-08-06 18:33 ` Peter Zijlstra
2007-08-06 20:23 ` Matt Mackall
2007-08-07 0:09 ` Daniel Phillips
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20070806103658.990602000@chello.nl \
--to=a.p.zijlstra@chello.nl \
--cc=Lee.Schermerhorn@hp.com \
--cc=SteveD@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=clameter@sgi.com \
--cc=davem@davemloft.net \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mpm@selenic.com \
--cc=penberg@cs.helsinki.fi \
--cc=phillips@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox