From: Hirokazu Takahashi <taka@valinux.co.jp>
To: linux-kernel@vger.kernel.org, lhms-devel@lists.sourceforge.net
Cc: linux-mm@kvack.org
Subject: [PATCH] memory hotremoval for linux-2.6.7 [11/16]
Date: Wed, 14 Jul 2004 23:05:43 +0900 (JST) [thread overview]
Message-ID: <20040714.230543.13595439.taka@valinux.co.jp> (raw)
In-Reply-To: <20040714.224138.95803956.taka@valinux.co.jp>
--- linux-2.6.7.ORG/include/linux/hugetlb.h Mon Jul 5 14:05:39 2032
+++ linux-2.6.7/include/linux/hugetlb.h Mon Jul 5 14:06:19 2032
@@ -27,6 +27,7 @@ struct page *follow_huge_pmd(struct mm_s
pmd_t *pmd, int write);
extern int hugetlb_fault(struct mm_struct *, struct vm_area_struct *,
int, unsigned long);
+int try_to_unmap_hugepage(struct page *page, struct vm_area_struct *vma, struct list_head *force);
int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
int pmd_huge(pmd_t pmd);
struct page *alloc_huge_page(void);
@@ -84,6 +85,7 @@ static inline unsigned long hugetlb_tota
#define alloc_huge_page() ({ NULL; })
#define free_huge_page(p) ({ (void)(p); BUG(); })
#define hugetlb_fault(mm, vma, write, addr) 0
+#define try_to_unmap_hugepage(page, vma, force) 0
#ifndef HPAGE_MASK
#define HPAGE_MASK 0 /* Keep the compiler happy */
--- linux-2.6.7.ORG/mm/rmap.c Mon Jul 5 14:01:22 2032
+++ linux-2.6.7/mm/rmap.c Mon Jul 5 14:06:19 2032
@@ -27,6 +27,7 @@
* on the mm->page_table_lock
*/
#include <linux/mm.h>
+#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
@@ -441,6 +442,13 @@ static int try_to_unmap_one(struct page
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
+
+ /*
+ * Is there any better way to check whether the page is
+ * HugePage or not?
+ */
+ if (vma && is_vm_hugetlb_page(vma))
+ return try_to_unmap_hugepage(page, vma, force);
/*
* We need the page_table_lock to protect us from page faults,
--- linux-2.6.7.ORG/arch/i386/mm/hugetlbpage.c Mon Jul 5 14:05:39 2032
+++ linux-2.6.7/arch/i386/mm/hugetlbpage.c Mon Jul 5 14:06:19 2032
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
+#include <linux/rmap.h>
#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -83,6 +84,7 @@ int copy_hugetlb_page_range(struct mm_st
if (!pte_none(entry)) {
ptepage = pte_page(entry);
get_page(ptepage);
+ page_dup_rmap(ptepage);
dst->rss += (HPAGE_SIZE / PAGE_SIZE);
}
set_pte(dst_pte, entry);
@@ -234,6 +236,7 @@ void unmap_hugepage_range(struct vm_area
if (pte_none(pte))
continue;
page = pte_page(pte);
+ page_remove_rmap(page);
put_page(page);
mm->rss -= (HPAGE_SIZE / PAGE_SIZE);
}
@@ -288,6 +291,7 @@ again:
spin_lock(&mm->page_table_lock);
if (pte_none(*pte)) {
set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
+ page_add_file_rmap(page);
flush_tlb_page(vma, address);
update_mmu_cache(vma, address, *pte);
} else {
@@ -332,3 +336,87 @@ int hugetlb_prefault(struct address_spac
#endif
return ret;
}
+
+/*
+ * At what user virtual address is page expected in vma?
+ */
+static inline unsigned long
+huge_vma_address(struct page *page, struct vm_area_struct *vma)
+{
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ unsigned long address;
+
+ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << HPAGE_SHIFT);
+ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
+ /* page should be within any vma from prio_tree_next */
+ BUG_ON(!PageAnon(page));
+ return -EFAULT;
+ }
+ return address;
+}
+
+/*
+ * Try to clear the PTE which map the hugepage.
+ */
+int try_to_unmap_hugepage(struct page *page, struct vm_area_struct *vma,
+ struct list_head *force)
+{
+ pte_t *pte;
+ pte_t pteval;
+ int ret = SWAP_AGAIN;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long address;
+
+ address = huge_vma_address(page, vma);
+ if (address == -EFAULT)
+ goto out;
+
+ /*
+ * We need the page_table_lock to protect us from page faults,
+ * munmap, fork, etc...
+ */
+ if (!spin_trylock(&mm->page_table_lock))
+ goto out;
+
+ pte = huge_pte_offset(mm, address);
+ if (!pte || pte_none(*pte))
+ goto out_unlock;
+ if (!pte_present(*pte))
+ goto out_unlock;
+
+ if (page_to_pfn(page) != pte_pfn(*pte))
+ goto out_unlock;
+
+ BUG_ON(!vma);
+
+#if 0
+ if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
+ ptep_test_and_clear_young(pte)) {
+ ret = SWAP_FAIL;
+ goto out_unlock;
+ }
+#endif
+
+ /* Nuke the page table entry. */
+ flush_cache_page(vma, address);
+ pteval = ptep_get_and_clear(pte);
+ flush_tlb_range(vma, address, address + HPAGE_SIZE);
+
+ /* Move the dirty bit to the physical page now the pte is gone. */
+ if (pte_dirty(pteval))
+ set_page_dirty(page);
+
+ BUG_ON(PageAnon(page));
+
+ mm->rss -= (HPAGE_SIZE / PAGE_SIZE);
+ BUG_ON(!page->mapcount);
+ page->mapcount--;
+ page_cache_release(page);
+
+out_unlock:
+ spin_unlock(&mm->page_table_lock);
+
+out:
+ return ret;
+}
+
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"aart@kvack.org"> aart@kvack.org </a>
next prev parent reply other threads:[~2004-07-14 14:05 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2004-07-14 13:41 [PATCH] memory hotremoval for linux-2.6.7 [0/16] Hirokazu Takahashi
2004-07-14 14:02 ` [PATCH] memory hotremoval for linux-2.6.7 [1/16] Hirokazu Takahashi
2004-07-14 14:02 ` [PATCH] memory hotremoval for linux-2.6.7 [2/16] Hirokazu Takahashi
2004-07-14 14:03 ` [PATCH] memory hotremoval for linux-2.6.7 [3/16] Hirokazu Takahashi
2004-07-14 14:03 ` [PATCH] memory hotremoval for linux-2.6.7 [4/16] Hirokazu Takahashi
2004-07-14 14:03 ` [PATCH] memory hotremoval for linux-2.6.7 [5/16] Hirokazu Takahashi
2004-07-14 14:04 ` [PATCH] memory hotremoval for linux-2.6.7 [6/16] Hirokazu Takahashi
2004-07-14 14:04 ` [PATCH] memory hotremoval for linux-2.6.7 [7/16] Hirokazu Takahashi
2004-07-14 14:04 ` [PATCH] memory hotremoval for linux-2.6.7 [8/16] Hirokazu Takahashi
2004-07-14 14:05 ` [PATCH] memory hotremoval for linux-2.6.7 [9/16] Hirokazu Takahashi
2004-07-14 14:05 ` [PATCH] memory hotremoval for linux-2.6.7 [10/16] Hirokazu Takahashi
2004-07-14 14:05 ` Hirokazu Takahashi [this message]
2004-07-14 14:05 ` [BUG][PATCH] memory hotremoval for linux-2.6.7 [12/16] Hirokazu Takahashi
2004-07-14 14:06 ` [PATCH] memory hotremoval for linux-2.6.7 [13/16] Hirokazu Takahashi
2004-07-14 14:06 ` [PATCH] memory hotremoval for linux-2.6.7 [14/16] Hirokazu Takahashi
2004-07-14 14:06 ` [BUG] [PATCH] memory hotremoval for linux-2.6.7 [15/16] Hirokazu Takahashi
2004-07-14 14:06 ` [PATCH] memory hotremoval for linux-2.6.7 [16/16] Hirokazu Takahashi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20040714.230543.13595439.taka@valinux.co.jp \
--to=taka@valinux.co.jp \
--cc=lhms-devel@lists.sourceforge.net \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox