* Re: [PATCH 23/31] mm, vmscan: Avoid passing in classzone_idx unnecessarily to shrink_node
[not found] <00e301d1d68b$121ffee0$365ffca0$@alibaba-inc.com>
@ 2016-07-05 7:04 ` Hillf Danton
0 siblings, 0 replies; 3+ messages in thread
From: Hillf Danton @ 2016-07-05 7:04 UTC (permalink / raw)
To: Mel Gorman; +Cc: linux-kernel, linux-mm, Andrew Morton
>
> shrink_node receives all information it needs about classzone_idx
> from sc->reclaim_idx so remove the aliases.
>
> Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
> ---
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
> mm/vmscan.c | 20 +++++++++-----------
> 1 file changed, 9 insertions(+), 11 deletions(-)
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH 23/31] mm, vmscan: Avoid passing in classzone_idx unnecessarily to shrink_node
2016-07-01 20:01 [PATCH 00/31] Move LRU page reclaim from zones to nodes v8 Mel Gorman
@ 2016-07-01 20:01 ` Mel Gorman
0 siblings, 0 replies; 3+ messages in thread
From: Mel Gorman @ 2016-07-01 20:01 UTC (permalink / raw)
To: Andrew Morton, Linux-MM
Cc: Rik van Riel, Vlastimil Babka, Johannes Weiner, LKML, Mel Gorman
shrink_node receives all information it needs about classzone_idx
from sc->reclaim_idx so remove the aliases.
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
---
mm/vmscan.c | 20 +++++++++-----------
1 file changed, 9 insertions(+), 11 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a6b30fe1de89..6534fbe1b96f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2426,8 +2426,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
return true;
}
-static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc,
- enum zone_type classzone_idx)
+static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
{
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long nr_reclaimed, nr_scanned;
@@ -2653,7 +2652,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
/* need some check for avoid more shrink_zone() */
}
- shrink_node(zone->zone_pgdat, sc, classzone_idx);
+ shrink_node(zone->zone_pgdat, sc);
}
/*
@@ -3077,7 +3076,6 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
* This is used to determine if the scanning priority needs to be raised.
*/
static bool kswapd_shrink_node(pg_data_t *pgdat,
- int classzone_idx,
struct scan_control *sc)
{
struct zone *zone;
@@ -3085,7 +3083,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
/* Reclaim a number of pages proportional to the number of zones */
sc->nr_to_reclaim = 0;
- for (z = 0; z <= classzone_idx; z++) {
+ for (z = 0; z <= sc->reclaim_idx; z++) {
zone = pgdat->node_zones + z;
if (!populated_zone(zone))
continue;
@@ -3097,7 +3095,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
* Historically care was taken to put equal pressure on all zones but
* now pressure is applied based on node LRU order.
*/
- shrink_node(pgdat, sc, classzone_idx);
+ shrink_node(pgdat, sc);
/*
* Fragmentation may mean that the system cannot be rebalanced for
@@ -3159,7 +3157,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
if (!populated_zone(zone))
continue;
- classzone_idx = i;
+ sc.reclaim_idx = i;
break;
}
}
@@ -3169,12 +3167,12 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* high to low zone to avoid prematurely clearing pgdat
* congested state.
*/
- for (i = classzone_idx; i >= 0; i--) {
+ for (i = sc.reclaim_idx; i >= 0; i--) {
zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;
- if (zone_balanced(zone, sc.order, classzone_idx))
+ if (zone_balanced(zone, sc.order, sc.reclaim_idx))
goto out;
}
@@ -3205,7 +3203,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* enough pages are already being scanned that that high
* watermark would be met at 100% efficiency.
*/
- if (kswapd_shrink_node(pgdat, classzone_idx, &sc))
+ if (kswapd_shrink_node(pgdat, &sc))
raise_priority = false;
/*
@@ -3677,7 +3675,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
* priorities until we have enough memory freed.
*/
do {
- shrink_node(pgdat, &sc, classzone_idx);
+ shrink_node(pgdat, &sc);
} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
}
--
2.6.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH 23/31] mm, vmscan: Avoid passing in classzone_idx unnecessarily to shrink_node
2016-07-01 15:37 [PATCH 00/31] Move LRU page reclaim from zones to nodes v8 Mel Gorman
@ 2016-07-01 15:37 ` Mel Gorman
0 siblings, 0 replies; 3+ messages in thread
From: Mel Gorman @ 2016-07-01 15:37 UTC (permalink / raw)
To: Andrew Morton, Linux-MM
Cc: Rik van Riel, Vlastimil Babka, Johannes Weiner, LKML, Mel Gorman
shrink_node receives all information it needs about classzone_idx
from sc->reclaim_idx so remove the aliases.
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
---
mm/vmscan.c | 20 +++++++++-----------
1 file changed, 9 insertions(+), 11 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a6b30fe1de89..6534fbe1b96f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2426,8 +2426,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
return true;
}
-static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc,
- enum zone_type classzone_idx)
+static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
{
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long nr_reclaimed, nr_scanned;
@@ -2653,7 +2652,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
/* need some check for avoid more shrink_zone() */
}
- shrink_node(zone->zone_pgdat, sc, classzone_idx);
+ shrink_node(zone->zone_pgdat, sc);
}
/*
@@ -3077,7 +3076,6 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
* This is used to determine if the scanning priority needs to be raised.
*/
static bool kswapd_shrink_node(pg_data_t *pgdat,
- int classzone_idx,
struct scan_control *sc)
{
struct zone *zone;
@@ -3085,7 +3083,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
/* Reclaim a number of pages proportional to the number of zones */
sc->nr_to_reclaim = 0;
- for (z = 0; z <= classzone_idx; z++) {
+ for (z = 0; z <= sc->reclaim_idx; z++) {
zone = pgdat->node_zones + z;
if (!populated_zone(zone))
continue;
@@ -3097,7 +3095,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
* Historically care was taken to put equal pressure on all zones but
* now pressure is applied based on node LRU order.
*/
- shrink_node(pgdat, sc, classzone_idx);
+ shrink_node(pgdat, sc);
/*
* Fragmentation may mean that the system cannot be rebalanced for
@@ -3159,7 +3157,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
if (!populated_zone(zone))
continue;
- classzone_idx = i;
+ sc.reclaim_idx = i;
break;
}
}
@@ -3169,12 +3167,12 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* high to low zone to avoid prematurely clearing pgdat
* congested state.
*/
- for (i = classzone_idx; i >= 0; i--) {
+ for (i = sc.reclaim_idx; i >= 0; i--) {
zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;
- if (zone_balanced(zone, sc.order, classzone_idx))
+ if (zone_balanced(zone, sc.order, sc.reclaim_idx))
goto out;
}
@@ -3205,7 +3203,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* enough pages are already being scanned that that high
* watermark would be met at 100% efficiency.
*/
- if (kswapd_shrink_node(pgdat, classzone_idx, &sc))
+ if (kswapd_shrink_node(pgdat, &sc))
raise_priority = false;
/*
@@ -3677,7 +3675,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
* priorities until we have enough memory freed.
*/
do {
- shrink_node(pgdat, &sc, classzone_idx);
+ shrink_node(pgdat, &sc);
} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
}
--
2.6.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2016-07-05 7:05 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <00e301d1d68b$121ffee0$365ffca0$@alibaba-inc.com>
2016-07-05 7:04 ` [PATCH 23/31] mm, vmscan: Avoid passing in classzone_idx unnecessarily to shrink_node Hillf Danton
2016-07-01 20:01 [PATCH 00/31] Move LRU page reclaim from zones to nodes v8 Mel Gorman
2016-07-01 20:01 ` [PATCH 23/31] mm, vmscan: Avoid passing in classzone_idx unnecessarily to shrink_node Mel Gorman
-- strict thread matches above, loose matches on Subject: below --
2016-07-01 15:37 [PATCH 00/31] Move LRU page reclaim from zones to nodes v8 Mel Gorman
2016-07-01 15:37 ` [PATCH 23/31] mm, vmscan: Avoid passing in classzone_idx unnecessarily to shrink_node Mel Gorman
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox