memcg: free memcg_caches slot on css offline
We need to look up a kmem_cache in ->memcg_params.memcg_caches arrays only
on allocations, so there is no need to have the array entries set until
css free - we can clear them on css offline. This will allow us to reuse
array entries more efficiently and avoid costly array relocations.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6f3c0fc..abfe013 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -334,6 +334,7 @@
#if defined(CONFIG_MEMCG_KMEM)
/* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
+ bool kmem_acct_active;
#endif
int last_scanned_node;
@@ -354,7 +355,7 @@
#ifdef CONFIG_MEMCG_KMEM
bool memcg_kmem_is_active(struct mem_cgroup *memcg)
{
- return memcg->kmemcg_id >= 0;
+ return memcg->kmem_acct_active;
}
#endif
@@ -585,7 +586,7 @@
static void disarm_kmem_keys(struct mem_cgroup *memcg)
{
- if (memcg_kmem_is_active(memcg)) {
+ if (memcg->kmemcg_id >= 0) {
static_key_slow_dec(&memcg_kmem_enabled_key);
memcg_free_cache_id(memcg->kmemcg_id);
}
@@ -2666,6 +2667,7 @@
{
struct mem_cgroup *memcg;
struct kmem_cache *memcg_cachep;
+ int kmemcg_id;
VM_BUG_ON(!is_root_cache(cachep));
@@ -2673,10 +2675,11 @@
return cachep;
memcg = get_mem_cgroup_from_mm(current->mm);
- if (!memcg_kmem_is_active(memcg))
+ kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id);
+ if (kmemcg_id < 0)
goto out;
- memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
+ memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
if (likely(memcg_cachep))
return memcg_cachep;
@@ -3318,8 +3321,8 @@
int err = 0;
int memcg_id;
- if (memcg_kmem_is_active(memcg))
- return 0;
+ BUG_ON(memcg->kmemcg_id >= 0);
+ BUG_ON(memcg->kmem_acct_active);
/*
* For simplicity, we won't allow this to be disabled. It also can't
@@ -3362,6 +3365,7 @@
* patched.
*/
memcg->kmemcg_id = memcg_id;
+ memcg->kmem_acct_active = true;
out:
return err;
}
@@ -4041,6 +4045,22 @@
return mem_cgroup_sockets_init(memcg, ss);
}
+static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
+{
+ if (!memcg->kmem_acct_active)
+ return;
+
+ /*
+ * Clear the 'active' flag before clearing memcg_caches arrays entries.
+ * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
+ * guarantees no cache will be created for this cgroup after we are
+ * done (see memcg_create_kmem_cache()).
+ */
+ memcg->kmem_acct_active = false;
+
+ memcg_deactivate_kmem_caches(memcg);
+}
+
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
memcg_destroy_kmem_caches(memcg);
@@ -4052,6 +4072,10 @@
return 0;
}
+static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
+{
+}
+
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
}
@@ -4608,6 +4632,8 @@
spin_unlock(&memcg->event_list_lock);
vmpressure_cleanup(&memcg->vmpressure);
+
+ memcg_deactivate_kmem(memcg);
}
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)