From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
To: linux-mm@kvack.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
Matt Mackall <mpm@selenic.com>, Cliff Wickman <cpw@sgi.com>,
KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
Johannes Weiner <hannes@cmpxchg.org>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
Michal Hocko <mhocko@suse.cz>,
"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
Pavel Emelyanov <xemul@parallels.com>,
Rik van Riel <riel@redhat.com>,
kirill.shutemov@linux.intel.com, linux-kernel@vger.kernel.org
Subject: [PATCH 07/11] memcg: redefine callback functions for page table walker
Date: Wed, 11 Dec 2013 17:09:03 -0500 [thread overview]
Message-ID: <1386799747-31069-8-git-send-email-n-horiguchi@ah.jp.nec.com> (raw)
In-Reply-To: <1386799747-31069-1-git-send-email-n-horiguchi@ah.jp.nec.com>
Move code around pte loop in mem_cgroup_count_precharge_pte_range() into
mem_cgroup_count_precharge_pte() connected to pte_entry().
We don't change the callback mem_cgroup_move_charge_pte_range() for now,
because we can't do the same replacement easily due to 'goto retry'.
ChangeLog v2:
- rebase onto mmots
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
---
mm/memcontrol.c | 71 ++++++++++++++++++++++-----------------------------------
1 file changed, 27 insertions(+), 44 deletions(-)
diff --git v3.13-rc3-mmots-2013-12-10-16-38.orig/mm/memcontrol.c v3.13-rc3-mmots-2013-12-10-16-38/mm/memcontrol.c
index cbac7219fa69..3ffc36c8db9e 100644
--- v3.13-rc3-mmots-2013-12-10-16-38.orig/mm/memcontrol.c
+++ v3.13-rc3-mmots-2013-12-10-16-38/mm/memcontrol.c
@@ -6915,30 +6915,29 @@ static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
}
#endif
-static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
+static int mem_cgroup_count_precharge_pte(pte_t *pte,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- struct vm_area_struct *vma = walk->private;
- pte_t *pte;
+ if (get_mctgt_type(walk->vma, addr, *pte, NULL))
+ mc.precharge++; /* increment precharge temporarily */
+ return 0;
+}
+
+static int mem_cgroup_count_precharge_pmd(pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct vm_area_struct *vma = walk->vma;
spinlock_t *ptl;
if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
mc.precharge += HPAGE_PMD_NR;
spin_unlock(ptl);
- return 0;
+ /* don't call mem_cgroup_count_precharge_pte() */
+ walk->skip = 1;
}
-
- if (pmd_trans_unstable(pmd))
- return 0;
- pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- for (; addr != end; pte++, addr += PAGE_SIZE)
- if (get_mctgt_type(vma, addr, *pte, NULL))
- mc.precharge++; /* increment precharge temporarily */
- pte_unmap_unlock(pte - 1, ptl);
- cond_resched();
-
return 0;
}
@@ -6947,18 +6946,14 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
unsigned long precharge;
struct vm_area_struct *vma;
+ struct mm_walk mem_cgroup_count_precharge_walk = {
+ .pmd_entry = mem_cgroup_count_precharge_pmd,
+ .pte_entry = mem_cgroup_count_precharge_pte,
+ .mm = mm,
+ };
down_read(&mm->mmap_sem);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- struct mm_walk mem_cgroup_count_precharge_walk = {
- .pmd_entry = mem_cgroup_count_precharge_pte_range,
- .mm = mm,
- .private = vma,
- };
- if (is_vm_hugetlb_page(vma))
- continue;
- walk_page_range(vma->vm_start, vma->vm_end,
- &mem_cgroup_count_precharge_walk);
- }
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ walk_page_vma(vma, &mem_cgroup_count_precharge_walk);
up_read(&mm->mmap_sem);
precharge = mc.precharge;
@@ -7097,7 +7092,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
struct mm_walk *walk)
{
int ret = 0;
- struct vm_area_struct *vma = walk->private;
+ struct vm_area_struct *vma = walk->vma;
pte_t *pte;
spinlock_t *ptl;
enum mc_target_type target_type;
@@ -7198,6 +7193,10 @@ put: /* get_mctgt_type() gets the page */
static void mem_cgroup_move_charge(struct mm_struct *mm)
{
struct vm_area_struct *vma;
+ struct mm_walk mem_cgroup_move_charge_walk = {
+ .pmd_entry = mem_cgroup_move_charge_pte_range,
+ .mm = mm,
+ };
lru_add_drain_all();
retry:
@@ -7213,24 +7212,8 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
cond_resched();
goto retry;
}
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- int ret;
- struct mm_walk mem_cgroup_move_charge_walk = {
- .pmd_entry = mem_cgroup_move_charge_pte_range,
- .mm = mm,
- .private = vma,
- };
- if (is_vm_hugetlb_page(vma))
- continue;
- ret = walk_page_range(vma->vm_start, vma->vm_end,
- &mem_cgroup_move_charge_walk);
- if (ret)
- /*
- * means we have consumed all precharges and failed in
- * doing additional charge. Just abandon here.
- */
- break;
- }
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ walk_page_vma(vma, &mem_cgroup_move_charge_walk);
up_read(&mm->mmap_sem);
}
--
1.8.3.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-12-11 22:09 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-12-11 22:08 [PATCH 00/11 v3] update " Naoya Horiguchi
2013-12-11 22:08 ` [PATCH 01/11] pagewalk: update page table walker core Naoya Horiguchi
2014-01-08 23:48 ` Andrew Morton
2014-01-09 4:35 ` Naoya Horiguchi
2013-12-11 22:08 ` [PATCH 02/11] pagewalk: add walk_page_vma() Naoya Horiguchi
2013-12-11 22:08 ` [PATCH 03/11] smaps: redefine callback functions for page table walker Naoya Horiguchi
2013-12-11 22:09 ` [PATCH 04/11] clear_refs: " Naoya Horiguchi
2013-12-11 22:09 ` [PATCH 05/11] pagemap: " Naoya Horiguchi
2013-12-11 22:09 ` [PATCH 06/11] numa_maps: " Naoya Horiguchi
2013-12-11 22:09 ` Naoya Horiguchi [this message]
2013-12-11 22:09 ` [PATCH 08/11] madvise: " Naoya Horiguchi
2013-12-11 22:09 ` [PATCH 09/11] arch/powerpc/mm/subpage-prot.c: use walk_page_vma() instead of walk_page_range() Naoya Horiguchi
2013-12-11 22:09 ` [PATCH 10/11] pagewalk: remove argument hmask from hugetlb_entry() Naoya Horiguchi
2013-12-11 22:09 ` [PATCH 11/11] mempolicy: apply page table walker on queue_pages_range() Naoya Horiguchi
-- strict thread matches above, loose matches on Subject: below --
2014-02-10 21:44 [PATCH 00/11 v5] update page table walker Naoya Horiguchi
2014-02-10 21:44 ` [PATCH 07/11] memcg: redefine callback functions for " Naoya Horiguchi
2014-01-13 16:54 [PATCH 00/11 v4] update " Naoya Horiguchi
2014-01-13 16:54 ` [PATCH 07/11] memcg: redefine callback functions for " Naoya Horiguchi
2013-10-30 21:44 [PATCH 00/11 v2] update " Naoya Horiguchi
2013-10-30 21:44 ` [PATCH 07/11] memcg: redefine callback functions for " Naoya Horiguchi
2013-10-14 17:36 [PATCH 0/11] update " Naoya Horiguchi
2013-10-14 17:37 ` [PATCH 07/11] memcg: redefine callback functions for " Naoya Horiguchi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1386799747-31069-8-git-send-email-n-horiguchi@ah.jp.nec.com \
--to=n-horiguchi@ah.jp.nec.com \
--cc=akpm@linux-foundation.org \
--cc=aneesh.kumar@linux.vnet.ibm.com \
--cc=cpw@sgi.com \
--cc=hannes@cmpxchg.org \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.cz \
--cc=mpm@selenic.com \
--cc=riel@redhat.com \
--cc=xemul@parallels.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox