From: Mel Gorman <mgorman@suse.de>
To: Linux-MM <linux-mm@kvack.org>
Cc: Linux-FSDevel <linux-fsdevel@vger.kernel.org>
Subject: [PATCH 06/16] mm: page_alloc: Calculate classzone_idx once from the zonelist ref
Date: Fri, 18 Apr 2014 15:50:33 +0100 [thread overview]
Message-ID: <1397832643-14275-7-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1397832643-14275-1-git-send-email-mgorman@suse.de>
There is no need to calculate zone_idx(preferred_zone) multiple times
or use the pgdat to figure it out.
Signed-off-by: Mel Gorman <mgorman@suse.de>
---
mm/page_alloc.c | 43 ++++++++++++++++++++++++-------------------
1 file changed, 24 insertions(+), 19 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3f2a9dd..88a6dac 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1893,17 +1893,15 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
static struct page *
get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
- struct zone *preferred_zone, int migratetype)
+ struct zone *preferred_zone, int classzone_idx, int migratetype)
{
struct zoneref *z;
struct page *page = NULL;
- int classzone_idx;
struct zone *zone;
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
int zlc_active = 0; /* set if using zonelist_cache */
int did_zlc_setup = 0; /* just call zlc_setup() one time */
- classzone_idx = zone_idx(preferred_zone);
zonelist_scan:
/*
* Scan zonelist, looking for a zone with enough free.
@@ -2160,7 +2158,7 @@ static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int migratetype)
+ int classzone_idx, int migratetype)
{
struct page *page;
@@ -2178,7 +2176,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
order, zonelist, high_zoneidx,
ALLOC_WMARK_HIGH|ALLOC_CPUSET,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (page)
goto out;
@@ -2213,7 +2211,7 @@ static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int migratetype, bool sync_migration,
+ int classzone_idx, int migratetype, bool sync_migration,
bool *contended_compaction, bool *deferred_compaction,
unsigned long *did_some_progress)
{
@@ -2241,7 +2239,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist(gfp_mask, nodemask,
order, zonelist, high_zoneidx,
alloc_flags & ~ALLOC_NO_WATERMARKS,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (page) {
preferred_zone->compact_blockskip_flush = false;
compaction_defer_reset(preferred_zone, order, true);
@@ -2314,7 +2312,7 @@ static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int migratetype, unsigned long *did_some_progress)
+ int classzone_idx, int migratetype, unsigned long *did_some_progress)
{
struct page *page = NULL;
bool drained = false;
@@ -2332,7 +2330,8 @@ retry:
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx,
alloc_flags & ~ALLOC_NO_WATERMARKS,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx,
+ migratetype);
/*
* If an allocation failed after direct reclaim, it could be because
@@ -2355,14 +2354,14 @@ static inline struct page *
__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int migratetype)
+ int classzone_idx, int migratetype)
{
struct page *page;
do {
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (!page && gfp_mask & __GFP_NOFAIL)
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
@@ -2463,7 +2462,7 @@ static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int migratetype)
+ int classzone_idx, int migratetype)
{
const gfp_t wait = gfp_mask & __GFP_WAIT;
struct page *page = NULL;
@@ -2520,7 +2519,7 @@ rebalance:
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (page)
goto got_pg;
@@ -2535,7 +2534,7 @@ rebalance:
page = __alloc_pages_high_priority(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (page) {
goto got_pg;
}
@@ -2568,6 +2567,7 @@ rebalance:
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
+ classzone_idx,
migratetype, sync_migration,
&contended_compaction,
&deferred_compaction,
@@ -2591,7 +2591,8 @@ rebalance:
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
- migratetype, &did_some_progress);
+ classzone_idx, migratetype,
+ &did_some_progress);
if (page)
goto got_pg;
@@ -2610,7 +2611,7 @@ rebalance:
page = __alloc_pages_may_oom(gfp_mask, order,
zonelist, high_zoneidx,
nodemask, preferred_zone,
- migratetype);
+ classzone_idx, migratetype);
if (page)
goto got_pg;
@@ -2653,6 +2654,7 @@ rebalance:
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
+ classzone_idx,
migratetype, sync_migration,
&contended_compaction,
&deferred_compaction,
@@ -2680,11 +2682,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
struct zone *preferred_zone;
+ struct zoneref *preferred_zoneref;
struct page *page = NULL;
int migratetype = allocflags_to_migratetype(gfp_mask);
unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
struct mem_cgroup *memcg = NULL;
+ int classzone_idx;
gfp_mask &= gfp_allowed_mask;
@@ -2714,11 +2718,12 @@ retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
/* The preferred zone is used for statistics later */
- first_zones_zonelist(zonelist, high_zoneidx,
+ preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
nodemask ? : &cpuset_current_mems_allowed,
&preferred_zone);
if (!preferred_zone)
goto out;
+ classzone_idx = zonelist_zone_idx(preferred_zoneref);
#ifdef CONFIG_CMA
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
@@ -2728,7 +2733,7 @@ retry:
/* First allocation attempt */
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
zonelist, high_zoneidx, alloc_flags,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (unlikely(!page)) {
/*
* The first pass makes sure allocations are spread
@@ -2754,7 +2759,7 @@ retry:
gfp_mask = memalloc_noio_flags(gfp_mask);
page = __alloc_pages_slowpath(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
}
trace_mm_page_alloc(page, order, gfp_mask, migratetype);
--
1.8.4.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-04-18 14:50 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-04-18 14:50 [PATCH 00/16] Misc page alloc, shmem and mark_page_accessed optimisations Mel Gorman
2014-04-18 14:50 ` [PATCH 01/16] mm: Disable zone_reclaim_mode by default Mel Gorman
2014-04-18 17:26 ` Andi Kleen
2014-04-18 21:15 ` Dave Hansen
2014-04-18 14:50 ` [PATCH 02/16] mm: page_alloc: Do not cache reclaim distances Mel Gorman
2014-04-18 14:50 ` [PATCH 03/16] mm: page_alloc: Do not update zlc unless the zlc is active Mel Gorman
2014-04-18 17:52 ` Johannes Weiner
2014-04-18 14:50 ` [PATCH 04/16] mm: page_alloc: Do not treat a zone that cannot be used for dirty pages as "full" Mel Gorman
2014-04-18 17:52 ` Johannes Weiner
2014-04-18 14:50 ` [PATCH 05/16] mm: page_alloc: Use jump labels to avoid checking number_of_cpusets Mel Gorman
2014-04-18 14:50 ` Mel Gorman [this message]
2014-04-18 18:03 ` [PATCH 06/16] mm: page_alloc: Calculate classzone_idx once from the zonelist ref Johannes Weiner
2014-04-19 11:18 ` Mel Gorman
2014-04-18 14:50 ` [PATCH 07/16] mm: page_alloc: Only check the zone id check if pages are buddies Mel Gorman
2014-04-18 18:05 ` Johannes Weiner
2014-04-18 14:50 ` [PATCH 08/16] mm: page_alloc: Only check the alloc flags and gfp_mask for dirty once Mel Gorman
2014-04-18 18:08 ` Johannes Weiner
2014-04-19 11:19 ` Mel Gorman
2014-04-18 14:50 ` [PATCH 09/16] mm: page_alloc: Take the ALLOC_NO_WATERMARK check out of the fast path Mel Gorman
2014-04-18 18:10 ` Johannes Weiner
2014-04-18 14:50 ` [PATCH 10/16] mm: page_alloc: Use word-based accesses for get/set pageblock bitmaps Mel Gorman
2014-04-18 17:16 ` Vlastimil Babka
2014-04-18 14:50 ` [PATCH 11/16] mm: page_alloc: Reduce number of times page_to_pfn is called Mel Gorman
2014-04-18 14:50 ` [PATCH 12/16] mm: shmem: Avoid atomic operation during shmem_getpage_gfp Mel Gorman
2014-04-18 18:13 ` Johannes Weiner
2014-04-18 14:50 ` [PATCH 13/16] mm: Do not use atomic operations when releasing pages Mel Gorman
2014-04-18 14:50 ` [PATCH 14/16] mm: Do not use unnecessary atomic operations when adding pages to the LRU Mel Gorman
2014-04-18 14:50 ` [PATCH 15/16] mm: Non-atomically mark page accessed in write_begin where possible Mel Gorman
2014-04-18 14:50 ` [PATCH 16/16] mm: filemap: Prefetch page->flags if !PageUptodate Mel Gorman
2014-04-18 19:16 ` Hugh Dickins
2014-04-19 11:23 ` Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1397832643-14275-7-git-send-email-mgorman@suse.de \
--to=mgorman@suse.de \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox