From: Rohit Seth <rohitseth@google.com>
To: Andrew Morton <akpm@osdl.org>
Cc: Linux-mm@kvack.org, Linux-kernel@vger.kernel.org
Subject: [PATCH]: Adding a counter in vma to indicate the number of physical pages backing it
Date: Fri, 09 Jun 2006 18:33:55 -0700 [thread overview]
Message-ID: <1149903235.31417.84.camel@galaxy.corp.google.com> (raw)
Below is a patch that adds number of physical pages that each vma is
using in a process. Exporting this information to user space
using /proc/<pid>/maps interface.
There is currently /proc/<pid>/smaps that prints the detailed
information about the usage of physical pages but that is a very
expensive operation as it traverses all the PTs (for some one who is
just interested in getting that data for each vma).
Signed-off-by: Rohit Seth <rohitseth@google.com>
fs/exec.c | 1 +
fs/proc/task_mmu.c | 4 ++--
include/linux/mm.h | 1 +
mm/fremap.c | 2 ++
mm/hugetlb.c | 3 +++
mm/memory.c | 5 +++++
mm/migrate.c | 1 +
mm/rmap.c | 2 ++
mm/swapfile.c | 1 +
9 files changed, 18 insertions(+), 2 deletions(-)
--- linux-2.6.17-rc5-mm3.org/fs/exec.c 2006-06-05 11:08:40.000000000 -0700
+++ linux-2.6.17-rc5-mm3/fs/exec.c 2006-06-05 15:56:42.000000000 -0700
@@ -326,6 +326,7 @@
set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
page, vma->vm_page_prot))));
page_add_new_anon_rmap(page, vma, address);
+ vma->nphys++;
pte_unmap_unlock(pte, ptl);
/* no need for flush_tlb */
--- linux-2.6.17-rc5-mm3.org/fs/proc/task_mmu.c 2006-06-05 11:08:40.000000000 -0700
+++ linux-2.6.17-rc5-mm3/fs/proc/task_mmu.c 2006-06-06 14:23:48.000000000 -0700
@@ -145,14 +145,14 @@
ino = inode->i_ino;
}
- seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %08lu %02x:%02x %lu %n",
vma->vm_start,
vma->vm_end,
flags & VM_READ ? 'r' : '-',
flags & VM_WRITE ? 'w' : '-',
flags & VM_EXEC ? 'x' : '-',
flags & VM_MAYSHARE ? 's' : 'p',
- vma->vm_pgoff << PAGE_SHIFT,
+ vma->vm_pgoff << PAGE_SHIFT, vma->nphys,
MAJOR(dev), MINOR(dev), ino, &len);
/*
--- linux-2.6.17-rc5-mm3.org/include/linux/mm.h 2006-06-05 11:08:40.000000000 -0700
+++ linux-2.6.17-rc5-mm3/include/linux/mm.h 2006-06-05 16:27:05.000000000 -0700
@@ -111,6 +111,7 @@
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
+ unsigned long nphys; /* Num phys pages backing this vma */
};
/*
--- linux-2.6.17-rc5-mm3.org/mm/migrate.c 2006-06-05 11:08:40.000000000 -0700
+++ linux-2.6.17-rc5-mm3/mm/migrate.c 2006-06-09 17:17:31.000000000 -0700
@@ -181,6 +181,7 @@
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, pte);
lazy_mmu_prot_update(pte);
+ vma->nphys++;
out:
pte_unmap_unlock(ptep, ptl);
--- linux-2.6.17-rc5-mm3.org/mm/swapfile.c 2006-06-05 11:08:40.000000000 -0700
+++ linux-2.6.17-rc5-mm3/mm/swapfile.c 2006-06-09 17:24:24.000000000 -0700
@@ -500,6 +500,7 @@
* immediately swapped out again after swapon.
*/
activate_page(page);
+ vma->nphys++;
}
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
--- linux-2.6.17-rc5-mm3.org/mm/rmap.c 2006-06-05 11:08:40.000000000 -0700
+++ linux-2.6.17-rc5-mm3/mm/rmap.c 2006-06-09 17:22:59.000000000 -0700
@@ -620,6 +620,7 @@
dec_mm_counter(mm, file_rss);
+ vma->nphys--;
page_remove_rmap(page);
page_cache_release(page);
@@ -710,6 +711,7 @@
if (pte_dirty(pteval))
set_page_dirty(page);
+ vma->nphys--;
page_remove_rmap(page);
page_cache_release(page);
dec_mm_counter(mm, file_rss);
--- linux-2.6.17-rc5-mm3.org/mm/memory.c 2006-06-05 11:08:40.000000000 -0700
+++ linux-2.6.17-rc5-mm3/mm/memory.c 2006-06-05 15:57:16.000000000 -0700
@@ -677,6 +677,7 @@
mark_page_accessed(page);
file_rss--;
}
+ vma->nphys--;
page_remove_rmap(page);
tlb_remove_page(tlb, page);
continue;
@@ -2001,6 +2002,7 @@
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, pte);
lazy_mmu_prot_update(pte);
+ vma->nphys++;
unlock:
pte_unmap_unlock(page_table, ptl);
out:
@@ -2063,6 +2065,7 @@
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry);
+ vma->nphys++;
unlock:
pte_unmap_unlock(page_table, ptl);
return VM_FAULT_MINOR;
@@ -2201,6 +2204,7 @@
/* no need to invalidate: a not-present page shouldn't be cached */
update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry);
+ vma->nphys++;
unlock:
pte_unmap_unlock(page_table, ptl);
return ret;
@@ -2480,6 +2484,7 @@
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_page_prot = PAGE_READONLY;
gate_vma.vm_flags = 0;
+ gate_vma.nphys = 1;
return 0;
}
__initcall(gate_vma_init);
--- linux-2.6.17-rc5-mm3.org/mm/fremap.c 2006-06-05 11:08:40.000000000 -0700
+++ linux-2.6.17-rc5-mm3/mm/fremap.c 2006-06-08 15:00:11.000000000 -0700
@@ -35,6 +35,7 @@
set_page_dirty(page);
page_remove_rmap(page);
page_cache_release(page);
+ vma->nphys--;
}
} else {
if (!pte_file(pte))
@@ -84,6 +85,7 @@
pte_val = *pte;
update_mmu_cache(vma, addr, pte_val);
lazy_mmu_prot_update(pte_val);
+ vma->nphys++;
err = 0;
unlock:
pte_unmap_unlock(pte, ptl);
--- linux-2.6.17-rc5-mm3.org/mm/hugetlb.c 2006-06-05 11:08:40.000000000 -0700
+++ linux-2.6.17-rc5-mm3/mm/hugetlb.c 2006-06-09 18:23:56.000000000 -0700
@@ -346,6 +346,7 @@
get_page(ptepage);
add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
set_huge_pte_at(dst, addr, dst_pte, entry);
+ vma->nphys += (HPAGE_SIZE / PAGE_SIZE);
}
spin_unlock(&src->page_table_lock);
spin_unlock(&dst->page_table_lock);
@@ -386,6 +387,7 @@
page = pte_page(pte);
put_page(page);
add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
+ vma->nphys -= (HPAGE_SIZE / PAGE_SIZE);
}
spin_unlock(&mm->page_table_lock);
@@ -493,6 +495,7 @@
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
+ vma->nphys += (HPAGE_SIZE / PAGE_SIZE);
if (write_access && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next reply other threads:[~2006-06-10 1:33 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2006-06-10 1:33 Rohit Seth [this message]
2006-06-10 2:42 ` Andrew Morton
2006-06-12 17:49 ` Rohit Seth
2006-06-10 7:35 ` Nick Piggin
2006-06-11 10:15 ` Jan Engelhardt
2006-06-12 17:36 ` Rohit Seth
2006-06-12 17:58 ` Andi Kleen
2006-06-12 19:42 ` Rohit Seth
2006-06-13 3:51 ` Andi Kleen
2006-06-13 4:27 ` Nick Piggin
2006-06-13 16:59 ` Rohit Seth
2006-06-13 17:28 ` Hugh Dickins
2006-06-13 18:09 ` Rohit Seth
2006-06-13 17:31 ` Andi Kleen
2006-06-11 16:09 ` Arjan van de Ven
2006-06-12 11:17 ` Andi Kleen
2006-06-12 12:49 ` Jan Engelhardt
2006-06-12 12:54 ` Andi Kleen
2006-06-12 16:43 ` Christoph Lameter
2006-06-13 5:53 [PATCH]: Adding a counter in vma to indicate the number of physical_pages_backing it Albert Cahalan
2006-06-13 5:56 ` Andi Kleen
2006-06-13 17:10 ` Rohit Seth
2006-06-13 17:18 ` Andi Kleen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1149903235.31417.84.camel@galaxy.corp.google.com \
--to=rohitseth@google.com \
--cc=Linux-kernel@vger.kernel.org \
--cc=Linux-mm@kvack.org \
--cc=akpm@osdl.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox