From: Heesub Shin <heesub.shin@samsung.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Seth Jennings <sjennings@variantweb.net>
Cc: Nitin Gupta <ngupta@vflare.org>,
Dan Streetman <ddstreet@ieee.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Sunae Seo <sunae.seo@samsung.com>,
Heesub Shin <heesub.shin@samsung.com>
Subject: [RFC PATCH 6/9] mm/zbud: remove list_head for buddied list from zbud_header
Date: Tue, 14 Oct 2014 20:59:25 +0900 [thread overview]
Message-ID: <1413287968-13940-7-git-send-email-heesub.shin@samsung.com> (raw)
In-Reply-To: <1413287968-13940-1-git-send-email-heesub.shin@samsung.com>
zbud allocator links the _unbuddied_ zbud pages into a list in the pool.
When it tries to allocate some spaces, the list is first searched for
the best fit possible. Thus, current implementation has a list_head in
zbud_header structure to construct the list.
This patch simulates a list using the second double word of struct page,
instead of zbud_header. Then, we can eliminate the list_head in
zbud_header. Using _index and _mapcount fields (also including _count on
64-bits machines) in the page struct for list management looks a bit
odd, but no better idea now considering that page->lru is already in
use.
Signed-off-by: Heesub Shin <heesub.shin@samsung.com>
---
mm/zbud.c | 36 +++++++++++++++++++-----------------
1 file changed, 19 insertions(+), 17 deletions(-)
diff --git a/mm/zbud.c b/mm/zbud.c
index 383bab0..8a6dd6b 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -99,10 +99,8 @@ struct zbud_pool {
/*
* struct zbud_header - zbud page metadata occupying the first chunk of each
* zbud page.
- * @buddy: links the zbud page into the unbuddied lists in the pool
*/
struct zbud_header {
- struct list_head buddy;
bool under_reclaim;
};
@@ -223,21 +221,24 @@ static size_t get_num_chunks(struct page *page, enum buddy bud)
for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
/* Initializes the zbud header of a newly allocated zbud page */
-static struct zbud_header *init_zbud_page(struct page *page)
+static void init_zbud_page(struct page *page)
{
struct zbud_header *zhdr = page_address(page);
set_num_chunks(page, FIRST, 0);
set_num_chunks(page, LAST, 0);
- INIT_LIST_HEAD(&zhdr->buddy);
+ INIT_LIST_HEAD((struct list_head *) &page->index);
INIT_LIST_HEAD(&page->lru);
zhdr->under_reclaim = 0;
- return zhdr;
}
/* Resets the struct page fields and frees the page */
static void free_zbud_page(struct zbud_header *zhdr)
{
- __free_page(virt_to_page(zhdr));
+ struct page *page = virt_to_page(zhdr);
+
+ init_page_count(page);
+ page_mapcount_reset(page);
+ __free_page(page);
}
static int is_last_chunk(unsigned long handle)
@@ -341,7 +342,6 @@ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
unsigned long *handle)
{
int chunks, i, freechunks;
- struct zbud_header *zhdr = NULL;
enum buddy bud;
struct page *page;
@@ -355,10 +355,9 @@ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
/* First, try to find an unbuddied zbud page. */
for_each_unbuddied_list(i, chunks) {
if (!list_empty(&pool->unbuddied[i])) {
- zhdr = list_first_entry(&pool->unbuddied[i],
- struct zbud_header, buddy);
- page = virt_to_page(zhdr);
- list_del(&zhdr->buddy);
+ page = list_entry((unsigned long *)
+ pool->unbuddied[i].next, struct page, index);
+ list_del((struct list_head *) &page->index);
goto found;
}
}
@@ -370,7 +369,7 @@ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
return -ENOMEM;
spin_lock(&pool->lock);
pool->pages_nr++;
- zhdr = init_zbud_page(page);
+ init_zbud_page(page);
found:
if (get_num_chunks(page, FIRST) == 0)
@@ -384,7 +383,8 @@ found:
get_num_chunks(page, LAST) == 0) {
/* Add to unbuddied list */
freechunks = num_free_chunks(page);
- list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+ list_add((struct list_head *) &page->index,
+ &pool->unbuddied[freechunks]);
}
/* Add/move zbud page to beginning of LRU */
@@ -433,14 +433,15 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
freechunks = num_free_chunks(page);
if (freechunks == NCHUNKS) {
/* Remove from existing unbuddied list */
- list_del(&zhdr->buddy);
+ list_del((struct list_head *) &page->index);
/* zbud page is empty, free */
list_del(&page->lru);
free_zbud_page(zhdr);
pool->pages_nr--;
} else {
/* Add to unbuddied list */
- list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+ list_add((struct list_head *) &page->index,
+ &pool->unbuddied[freechunks]);
}
spin_unlock(&pool->lock);
@@ -501,7 +502,7 @@ int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
page = list_tail_entry(&pool->lru, struct page, lru);
zhdr = page_address(page);
list_del(&page->lru);
- list_del(&zhdr->buddy);
+ list_del((struct list_head *) &page->index);
/* Protect zbud page against free */
zhdr->under_reclaim = true;
/*
@@ -543,7 +544,8 @@ next:
} else if (get_num_chunks(page, FIRST) == 0 ||
get_num_chunks(page, LAST) == 0) {
/* add to unbuddied list */
- list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+ list_add((struct list_head *) &page->index,
+ &pool->unbuddied[freechunks]);
}
/* add to beginning of LRU */
--
1.9.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-10-14 12:00 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-10-14 11:59 [RFC PATCH 0/9] mm/zbud: support highmem pages Heesub Shin
2014-10-14 11:59 ` [RFC PATCH 1/9] mm/zbud: tidy up a bit Heesub Shin
2014-10-14 11:59 ` [RFC PATCH 2/9] mm/zbud: remove buddied list from zbud_pool Heesub Shin
2014-10-14 11:59 ` [RFC PATCH 3/9] mm/zbud: remove lru from zbud_header Heesub Shin
2014-10-14 11:59 ` [RFC PATCH 4/9] mm/zbud: remove first|last_chunks " Heesub Shin
2014-10-14 11:59 ` [RFC PATCH 5/9] mm/zbud: encode zbud handle using struct page Heesub Shin
2014-10-14 11:59 ` Heesub Shin [this message]
2014-10-14 11:59 ` [RFC PATCH 7/9] mm/zbud: drop zbud_header Heesub Shin
2014-10-14 11:59 ` [RFC PATCH 8/9] mm/zbud: allow clients to use highmem pages Heesub Shin
2014-10-14 11:59 ` [RFC PATCH 9/9] mm/zswap: use highmem pages for compressed pool Heesub Shin
2014-10-23 23:14 ` [RFC PATCH 0/9] mm/zbud: support highmem pages Dan Streetman
2014-10-24 15:27 ` Seth Jennings
2014-11-04 16:33 ` Seth Jennings
2015-01-27 20:24 ` Seth Jennings
2015-01-28 7:14 ` Heesub Shin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1413287968-13940-7-git-send-email-heesub.shin@samsung.com \
--to=heesub.shin@samsung.com \
--cc=akpm@linux-foundation.org \
--cc=ddstreet@ieee.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ngupta@vflare.org \
--cc=sjennings@variantweb.net \
--cc=sunae.seo@samsung.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox