diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b73988a..c208154 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -182,6 +182,29 @@ struct mem_cgroup_per_node { struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; }; +<<<<<<< HEAD +======= +/* + * Cgroups above their limits are maintained in a RB-Tree, independent of + * their hierarchy representation + */ + +struct mem_cgroup_tree_per_zone { + struct rb_root rb_root; + spinlock_t lock; +}; + +struct mem_cgroup_tree_per_node { + struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; +}; + +struct mem_cgroup_tree { + struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; +}; + +static struct mem_cgroup_tree soft_limit_tree __read_mostly; + +>>>>>>> tj-cgroups/for-3.12 struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; u64 threshold; @@ -255,7 +278,10 @@ struct mem_cgroup { bool oom_lock; atomic_t under_oom; +<<<<<<< HEAD atomic_t oom_wakeups; +======= +>>>>>>> tj-cgroups/for-3.12 int swappiness; /* OOM-Killer disable */ @@ -323,6 +349,7 @@ struct mem_cgroup { */ spinlock_t soft_lock; +<<<<<<< HEAD /* * If true then this group has increased parents' children_in_excess * when it got over the soft limit. @@ -334,6 +361,8 @@ struct mem_cgroup { /* Number of children that are in soft limit excess */ atomic_t children_in_excess; +======= +>>>>>>> tj-cgroups/for-3.12 struct mem_cgroup_per_node *nodeinfo[0]; /* WARNING: nodeinfo must be the last member here */ }; @@ -3573,9 +3602,15 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) * the page allocator. Therefore, the following sequence when backed by * the SLUB allocator: * +<<<<<<< HEAD * memcg_stop_kmem_account(); * kmalloc() * memcg_resume_kmem_account(); +======= + * memcg_stop_kmem_account(); + * kmalloc() + * memcg_resume_kmem_account(); +>>>>>>> tj-cgroups/for-3.12 * * would effectively ignore the fact that we should skip accounting, * since it will drive us directly to this function without passing