From: Marek Szyprowski <m.szyprowski@samsung.com>
To: linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org,
linux-samsung-soc@vger.kernel.org, linux-media@vger.kernel.org,
linux-mm@kvack.org
Cc: Michal Nazarewicz <mina86@mina86.com>,
Marek Szyprowski <m.szyprowski@samsung.com>,
Kyungmin Park <kyungmin.park@samsung.com>,
Andrew Morton <akpm@linux-foundation.org>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
Ankita Garg <ankita@in.ibm.com>,
Daniel Walker <dwalker@codeaurora.org>,
Johan MOSSBERG <johan.xx.mossberg@stericsson.com>,
Mel Gorman <mel@csn.ul.ie>, Pawel Osciak <pawel@osciak.com>
Subject: [PATCH 03/12] mm: move some functions from memory_hotplug.c to page_isolation.c
Date: Thu, 31 Mar 2011 15:15:59 +0200 [thread overview]
Message-ID: <1301577368-16095-4-git-send-email-m.szyprowski@samsung.com> (raw)
In-Reply-To: <1301577368-16095-1-git-send-email-m.szyprowski@samsung.com>
From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Memory hotplug is a logic for making pages unused in the specified
range of pfn. So, some of core logics can be used for other purpose
as allocating a very large contigous memory block.
This patch moves some functions from mm/memory_hotplug.c to
mm/page_isolation.c. This helps adding a function for large-alloc in
page_isolation.c with memory-unplug technique.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
[m.nazarewicz: reworded commit message]
Signed-off-by: Michal Nazarewicz <m.nazarewicz@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
[m.szyprowski: rebase and updated to v2.6.39-rc1 changes]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
CC: Michal Nazarewicz <mina86@mina86.com>
---
include/linux/page-isolation.h | 7 +++
mm/memory_hotplug.c | 109 ---------------------------------------
mm/page_isolation.c | 111 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 118 insertions(+), 109 deletions(-)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 051c1b1..58cdbac 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,5 +33,12 @@ test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
extern int set_migratetype_isolate(struct page *page);
extern void unset_migratetype_isolate(struct page *page);
+/*
+ * For migration.
+ */
+
+int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn);
+unsigned long scan_lru_pages(unsigned long start, unsigned long end);
+int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn);
#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 321fc74..746f6ed 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -640,115 +640,6 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
}
/*
- * Confirm all pages in a range [start, end) is belongs to the same zone.
- */
-static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
-{
- unsigned long pfn;
- struct zone *zone = NULL;
- struct page *page;
- int i;
- for (pfn = start_pfn;
- pfn < end_pfn;
- pfn += MAX_ORDER_NR_PAGES) {
- i = 0;
- /* This is just a CONFIG_HOLES_IN_ZONE check.*/
- while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
- i++;
- if (i == MAX_ORDER_NR_PAGES)
- continue;
- page = pfn_to_page(pfn + i);
- if (zone && page_zone(page) != zone)
- return 0;
- zone = page_zone(page);
- }
- return 1;
-}
-
-/*
- * Scanning pfn is much easier than scanning lru list.
- * Scan pfn from start to end and Find LRU page.
- */
-static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
-{
- unsigned long pfn;
- struct page *page;
- for (pfn = start; pfn < end; pfn++) {
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (PageLRU(page))
- return pfn;
- }
- }
- return 0;
-}
-
-static struct page *
-hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
-{
- /* This should be improooooved!! */
- return alloc_page(GFP_HIGHUSER_MOVABLE);
-}
-
-#define NR_OFFLINE_AT_ONCE_PAGES (256)
-static int
-do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
-{
- unsigned long pfn;
- struct page *page;
- int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
- int not_managed = 0;
- int ret = 0;
- LIST_HEAD(source);
-
- for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
- if (!pfn_valid(pfn))
- continue;
- page = pfn_to_page(pfn);
- if (!page_count(page))
- continue;
- /*
- * We can skip free pages. And we can only deal with pages on
- * LRU.
- */
- ret = isolate_lru_page(page);
- if (!ret) { /* Success */
- list_add_tail(&page->lru, &source);
- move_pages--;
- inc_zone_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
-
- } else {
-#ifdef CONFIG_DEBUG_VM
- printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
- pfn);
- dump_page(page);
-#endif
- /* Becasue we don't have big zone->lock. we should
- check this again here. */
- if (page_count(page)) {
- not_managed++;
- ret = -EBUSY;
- break;
- }
- }
- }
- if (!list_empty(&source)) {
- if (not_managed) {
- putback_lru_pages(&source);
- goto out;
- }
- /* this function returns # of failed pages */
- ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
- true, true);
- if (ret)
- putback_lru_pages(&source);
- }
-out:
- return ret;
-}
-
-/*
* remove from free_area[] and mark all as Reserved.
*/
static int
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 4ae42bb..8a3122c 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -5,6 +5,9 @@
#include <linux/mm.h>
#include <linux/page-isolation.h>
#include <linux/pageblock-flags.h>
+#include <linux/memcontrol.h>
+#include <linux/migrate.h>
+#include <linux/mm_inline.h>
#include "internal.h"
static inline struct page *
@@ -139,3 +142,111 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
spin_unlock_irqrestore(&zone->lock, flags);
return ret ? 0 : -EBUSY;
}
+
+
+/*
+ * Confirm all pages in a range [start, end) is belongs to the same zone.
+ */
+int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long pfn;
+ struct zone *zone = NULL;
+ struct page *page;
+ int i;
+ for (pfn = start_pfn;
+ pfn < end_pfn;
+ pfn += MAX_ORDER_NR_PAGES) {
+ i = 0;
+ /* This is just a CONFIG_HOLES_IN_ZONE check.*/
+ while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
+ i++;
+ if (i == MAX_ORDER_NR_PAGES)
+ continue;
+ page = pfn_to_page(pfn + i);
+ if (zone && page_zone(page) != zone)
+ return 0;
+ zone = page_zone(page);
+ }
+ return 1;
+}
+
+/*
+ * Scanning pfn is much easier than scanning lru list.
+ * Scan pfn from start to end and Find LRU page.
+ */
+unsigned long scan_lru_pages(unsigned long start, unsigned long end)
+{
+ unsigned long pfn;
+ struct page *page;
+ for (pfn = start; pfn < end; pfn++) {
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (PageLRU(page))
+ return pfn;
+ }
+ }
+ return 0;
+}
+
+struct page *
+hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
+{
+ /* This should be improooooved!! */
+ return alloc_page(GFP_HIGHUSER_MOVABLE);
+}
+
+#define NR_OFFLINE_AT_ONCE_PAGES (256)
+int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long pfn;
+ struct page *page;
+ int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
+ int not_managed = 0;
+ int ret = 0;
+ LIST_HEAD(source);
+
+ for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
+ if (!pfn_valid(pfn))
+ continue;
+ page = pfn_to_page(pfn);
+ if (!page_count(page))
+ continue;
+ /*
+ * We can skip free pages. And we can only deal with pages on
+ * LRU.
+ */
+ ret = isolate_lru_page(page);
+ if (!ret) { /* Success */
+ list_add_tail(&page->lru, &source);
+ move_pages--;
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+
+ } else {
+#ifdef CONFIG_DEBUG_VM
+ printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
+ pfn);
+ dump_page(page);
+#endif
+ /* Because we don't have big zone->lock. we should
+ check this again here. */
+ if (page_count(page)) {
+ not_managed++;
+ ret = -EBUSY;
+ break;
+ }
+ }
+ }
+ if (!list_empty(&source)) {
+ if (not_managed) {
+ putback_lru_pages(&source);
+ goto out;
+ }
+ /* this function returns # of failed pages */
+ ret = migrate_pages(&source, hotremove_migrate_alloc, MPOL_MF_MOVE_ALL, 0, 1);
+ if (ret)
+ putback_lru_pages(&source);
+ }
+out:
+ return ret;
+}
--
1.7.1.569.g6f426
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2011-03-31 13:16 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-03-31 13:15 [PATCHv9 0/12] Contiguous Memory Allocator Marek Szyprowski
2011-03-31 13:15 ` [PATCH 01/12] lib: bitmap: Added alignment offset for bitmap_find_next_zero_area() Marek Szyprowski
2011-03-31 13:15 ` [PATCH 02/12] lib: genalloc: Generic allocator improvements Marek Szyprowski
2011-03-31 13:15 ` Marek Szyprowski [this message]
2011-03-31 13:16 ` [PATCH 04/12] mm: alloc_contig_freed_pages() added Marek Szyprowski
2011-03-31 15:58 ` Dave Hansen
2011-03-31 19:24 ` Steven Rostedt
2011-03-31 20:33 ` Dave Hansen
2011-03-31 21:09 ` Michal Nazarewicz
2011-03-31 21:14 ` Dave Hansen
2011-03-31 22:18 ` Michal Nazarewicz
2011-03-31 22:26 ` Dave Hansen
2011-03-31 22:51 ` Michal Nazarewicz
2011-04-01 14:03 ` Dave Hansen
2011-04-04 13:15 ` Michal Nazarewicz
2011-04-05 7:23 ` Marek Szyprowski
2011-03-31 13:16 ` [PATCH 05/12] mm: alloc_contig_range() added Marek Szyprowski
2011-03-31 16:02 ` Dave Hansen
2011-03-31 16:26 ` Michal Nazarewicz
2011-03-31 19:28 ` Steven Rostedt
2011-03-31 19:52 ` Michal Nazarewicz
2011-03-31 20:28 ` Dave Hansen
2011-03-31 21:17 ` Michal Nazarewicz
2011-03-31 19:26 ` Steven Rostedt
2011-03-31 16:04 ` Dave Hansen
2011-03-31 13:16 ` [PATCH 06/12] mm: cma: Contiguous Memory Allocator added Marek Szyprowski
2011-03-31 13:16 ` [PATCH 07/12] mm: MIGRATE_CMA migration type added Marek Szyprowski
2011-03-31 13:16 ` [PATCH 08/12] mm: MIGRATE_CMA isolation functions added Marek Szyprowski
2011-03-31 13:16 ` [PATCH 09/12] mm: MIGRATE_CMA support added to CMA Marek Szyprowski
2011-03-31 13:16 ` [PATCH 10/12] mm: cma: add CMA 'regions style' API (for testing) Marek Szyprowski
2011-03-31 13:16 ` [PATCH 11/12] v4l: videobuf2: add CMA allocator " Marek Szyprowski
2011-03-31 13:16 ` [PATCH 12/12] ARM: S5PC110: Added CMA regions to Aquila and Goni boards Marek Szyprowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1301577368-16095-4-git-send-email-m.szyprowski@samsung.com \
--to=m.szyprowski@samsung.com \
--cc=akpm@linux-foundation.org \
--cc=ankita@in.ibm.com \
--cc=dwalker@codeaurora.org \
--cc=johan.xx.mossberg@stericsson.com \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=kyungmin.park@samsung.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-media@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-samsung-soc@vger.kernel.org \
--cc=mel@csn.ul.ie \
--cc=mina86@mina86.com \
--cc=pawel@osciak.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox