blob: ba76428ceecea18d6eab5d88c457169559daade6 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Dave Chinnera38e4082013-08-28 10:17:58 +10002/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +100010#include <linux/mm.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100011#include <linux/list_lru.h>
Glauber Costa5ca302c2013-08-28 10:18:18 +100012#include <linux/slab.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080013#include <linux/mutex.h>
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080014#include <linux/memcontrol.h>
Roman Gushchin4d96ba32019-07-11 20:56:31 -070015#include "slab.h"
Muchun Song88f2ef72022-03-22 14:40:56 -070016#include "internal.h"
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080017
Kirill Tkhai84c07d12018-08-17 15:47:25 -070018#ifdef CONFIG_MEMCG_KMEM
Muchun Song3eef1122021-11-05 13:37:59 -070019static LIST_HEAD(memcg_list_lrus);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080020static DEFINE_MUTEX(list_lrus_mutex);
21
Muchun Song3eef1122021-11-05 13:37:59 -070022static inline bool list_lru_memcg_aware(struct list_lru *lru)
23{
24 return lru->memcg_aware;
25}
26
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080027static void list_lru_register(struct list_lru *lru)
28{
Muchun Song3eef1122021-11-05 13:37:59 -070029 if (!list_lru_memcg_aware(lru))
30 return;
31
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080032 mutex_lock(&list_lrus_mutex);
Muchun Song3eef1122021-11-05 13:37:59 -070033 list_add(&lru->list, &memcg_list_lrus);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080034 mutex_unlock(&list_lrus_mutex);
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
Muchun Song3eef1122021-11-05 13:37:59 -070039 if (!list_lru_memcg_aware(lru))
40 return;
41
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080042 mutex_lock(&list_lrus_mutex);
43 list_del(&lru->list);
44 mutex_unlock(&list_lrus_mutex);
45}
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080046
Kirill Tkhaifae91d62018-08-17 15:48:10 -070047static int lru_shrinker_id(struct list_lru *lru)
48{
49 return lru->shrinker_id;
50}
51
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080052static inline struct list_lru_one *
Muchun Song6a6b7b72022-03-22 14:40:53 -070053list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080054{
Muchun Songbbca91c2022-03-22 14:41:25 -070055 if (list_lru_memcg_aware(lru) && idx >= 0) {
Muchun Songd701107042022-03-22 14:41:35 -070056 struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
Muchun Song6a6b7b72022-03-22 14:40:53 -070057
Muchun Song5abc1e32022-03-22 14:41:19 -070058 return mlru ? &mlru->node[nid] : NULL;
59 }
Muchun Songbbca91c2022-03-22 14:41:25 -070060 return &lru->node[nid].lru;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080061}
62
63static inline struct list_lru_one *
Muchun Song6a6b7b72022-03-22 14:40:53 -070064list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070065 struct mem_cgroup **memcg_ptr)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080066{
Muchun Song6a6b7b72022-03-22 14:40:53 -070067 struct list_lru_node *nlru = &lru->node[nid];
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070068 struct list_lru_one *l = &nlru->lru;
69 struct mem_cgroup *memcg = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080070
Muchun Songbbca91c2022-03-22 14:41:25 -070071 if (!list_lru_memcg_aware(lru))
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070072 goto out;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080073
Roman Gushchin4f103c62020-04-01 21:06:36 -070074 memcg = mem_cgroup_from_obj(ptr);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080075 if (!memcg)
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070076 goto out;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080077
Muchun Song7c52f652022-03-22 14:41:38 -070078 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070079out:
80 if (memcg_ptr)
81 *memcg_ptr = memcg;
82 return l;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080083}
84#else
Kirill Tkhaie0295232018-08-17 15:47:21 -070085static void list_lru_register(struct list_lru *lru)
86{
87}
88
89static void list_lru_unregister(struct list_lru *lru)
90{
91}
92
Kirill Tkhaifae91d62018-08-17 15:48:10 -070093static int lru_shrinker_id(struct list_lru *lru)
94{
95 return -1;
96}
97
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080098static inline bool list_lru_memcg_aware(struct list_lru *lru)
99{
100 return false;
101}
102
103static inline struct list_lru_one *
Muchun Song6a6b7b72022-03-22 14:40:53 -0700104list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800105{
Muchun Song6a6b7b72022-03-22 14:40:53 -0700106 return &lru->node[nid].lru;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800107}
108
109static inline struct list_lru_one *
Muchun Song6a6b7b72022-03-22 14:40:53 -0700110list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700111 struct mem_cgroup **memcg_ptr)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800112{
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700113 if (memcg_ptr)
114 *memcg_ptr = NULL;
Muchun Song6a6b7b72022-03-22 14:40:53 -0700115 return &lru->node[nid].lru;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800116}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700117#endif /* CONFIG_MEMCG_KMEM */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800118
Dave Chinnera38e4082013-08-28 10:17:58 +1000119bool list_lru_add(struct list_lru *lru, struct list_head *item)
120{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000121 int nid = page_to_nid(virt_to_page(item));
122 struct list_lru_node *nlru = &lru->node[nid];
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700123 struct mem_cgroup *memcg;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800124 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000125
126 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000127 if (list_empty(item)) {
Muchun Song6a6b7b72022-03-22 14:40:53 -0700128 l = list_lru_from_kmem(lru, nid, item, &memcg);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800129 list_add_tail(item, &l->list);
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700130 /* Set shrinker bit if the first element was added */
131 if (!l->nr_items++)
Yang Shi2bfd3632021-05-04 18:36:11 -0700132 set_shrinker_bit(memcg, nid,
133 lru_shrinker_id(lru));
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700134 nlru->nr_items++;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000135 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000136 return true;
137 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000138 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000139 return false;
140}
141EXPORT_SYMBOL_GPL(list_lru_add);
142
143bool list_lru_del(struct list_lru *lru, struct list_head *item)
144{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000145 int nid = page_to_nid(virt_to_page(item));
146 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800147 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000148
149 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000150 if (!list_empty(item)) {
Muchun Song6a6b7b72022-03-22 14:40:53 -0700151 l = list_lru_from_kmem(lru, nid, item, NULL);
Dave Chinnera38e4082013-08-28 10:17:58 +1000152 list_del_init(item);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800153 l->nr_items--;
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700154 nlru->nr_items--;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000155 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000156 return true;
157 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000158 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000159 return false;
160}
161EXPORT_SYMBOL_GPL(list_lru_del);
162
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800163void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
164{
165 list_del_init(item);
166 list->nr_items--;
167}
168EXPORT_SYMBOL_GPL(list_lru_isolate);
169
170void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
171 struct list_head *head)
172{
173 list_move(item, head);
174 list->nr_items--;
175}
176EXPORT_SYMBOL_GPL(list_lru_isolate_move);
177
Andrew Morton930eaac2018-08-17 15:46:11 -0700178unsigned long list_lru_count_one(struct list_lru *lru,
179 int nid, struct mem_cgroup *memcg)
Dave Chinnera38e4082013-08-28 10:17:58 +1000180{
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800181 struct list_lru_one *l;
Muchun Song41d17432021-11-05 13:37:50 -0700182 long count;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000183
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700184 rcu_read_lock();
Muchun Song7c52f652022-03-22 14:41:38 -0700185 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
Muchun Song5abc1e32022-03-22 14:41:19 -0700186 count = l ? READ_ONCE(l->nr_items) : 0;
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700187 rcu_read_unlock();
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000188
Muchun Song41d17432021-11-05 13:37:50 -0700189 if (unlikely(count < 0))
190 count = 0;
191
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000192 return count;
193}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800194EXPORT_SYMBOL_GPL(list_lru_count_one);
195
196unsigned long list_lru_count_node(struct list_lru *lru, int nid)
197{
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700198 struct list_lru_node *nlru;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800199
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700200 nlru = &lru->node[nid];
201 return nlru->nr_items;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800202}
Glauber Costa6a4f4962013-08-28 10:18:02 +1000203EXPORT_SYMBOL_GPL(list_lru_count_node);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000204
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800205static unsigned long
Muchun Song6a6b7b72022-03-22 14:40:53 -0700206__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800207 list_lru_walk_cb isolate, void *cb_arg,
208 unsigned long *nr_to_walk)
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000209{
Muchun Song6a6b7b72022-03-22 14:40:53 -0700210 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800211 struct list_lru_one *l;
Dave Chinnera38e4082013-08-28 10:17:58 +1000212 struct list_head *item, *n;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000213 unsigned long isolated = 0;
Dave Chinnera38e4082013-08-28 10:17:58 +1000214
Dave Chinnera38e4082013-08-28 10:17:58 +1000215restart:
Muchun Song5abc1e32022-03-22 14:41:19 -0700216 l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
217 if (!l)
218 goto out;
219
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800220 list_for_each_safe(item, n, &l->list) {
Dave Chinnera38e4082013-08-28 10:17:58 +1000221 enum lru_status ret;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000222
223 /*
224 * decrement nr_to_walk first so that we don't livelock if we
Ethon Paul3dc5f032020-06-04 16:49:19 -0700225 * get stuck on large numbers of LRU_RETRY items
Dave Chinner5cedf7212013-08-28 10:18:01 +1000226 */
Russell Kingc56b0972013-10-30 14:16:16 +0000227 if (!*nr_to_walk)
Dave Chinner5cedf7212013-08-28 10:18:01 +1000228 break;
Russell Kingc56b0972013-10-30 14:16:16 +0000229 --*nr_to_walk;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000230
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800231 ret = isolate(item, l, &nlru->lock, cb_arg);
Dave Chinnera38e4082013-08-28 10:17:58 +1000232 switch (ret) {
Johannes Weiner449dd692014-04-03 14:47:56 -0700233 case LRU_REMOVED_RETRY:
234 assert_spin_locked(&nlru->lock);
Joe Perchese4a9bc52020-04-06 20:08:39 -0700235 fallthrough;
Dave Chinnera38e4082013-08-28 10:17:58 +1000236 case LRU_REMOVED:
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000237 isolated++;
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700238 nlru->nr_items--;
Johannes Weiner449dd692014-04-03 14:47:56 -0700239 /*
240 * If the lru lock has been dropped, our list
241 * traversal is now invalid and so we have to
242 * restart from scratch.
243 */
244 if (ret == LRU_REMOVED_RETRY)
245 goto restart;
Dave Chinnera38e4082013-08-28 10:17:58 +1000246 break;
247 case LRU_ROTATE:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800248 list_move_tail(item, &l->list);
Dave Chinnera38e4082013-08-28 10:17:58 +1000249 break;
250 case LRU_SKIP:
251 break;
252 case LRU_RETRY:
Dave Chinner5cedf7212013-08-28 10:18:01 +1000253 /*
254 * The lru lock has been dropped, our list traversal is
255 * now invalid and so we have to restart from scratch.
256 */
Johannes Weiner449dd692014-04-03 14:47:56 -0700257 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000258 goto restart;
259 default:
260 BUG();
261 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000262 }
Muchun Song5abc1e32022-03-22 14:41:19 -0700263out:
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000264 return isolated;
265}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800266
267unsigned long
268list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
269 list_lru_walk_cb isolate, void *cb_arg,
270 unsigned long *nr_to_walk)
271{
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700272 struct list_lru_node *nlru = &lru->node[nid];
273 unsigned long ret;
274
275 spin_lock(&nlru->lock);
Muchun Song7c52f652022-03-22 14:41:38 -0700276 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
Muchun Song6a6b7b72022-03-22 14:40:53 -0700277 cb_arg, nr_to_walk);
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700278 spin_unlock(&nlru->lock);
279 return ret;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800280}
281EXPORT_SYMBOL_GPL(list_lru_walk_one);
282
Sebastian Andrzej Siewior6b51e882018-08-17 15:49:55 -0700283unsigned long
284list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
285 list_lru_walk_cb isolate, void *cb_arg,
286 unsigned long *nr_to_walk)
287{
288 struct list_lru_node *nlru = &lru->node[nid];
289 unsigned long ret;
290
291 spin_lock_irq(&nlru->lock);
Muchun Song7c52f652022-03-22 14:41:38 -0700292 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
Muchun Song6a6b7b72022-03-22 14:40:53 -0700293 cb_arg, nr_to_walk);
Sebastian Andrzej Siewior6b51e882018-08-17 15:49:55 -0700294 spin_unlock_irq(&nlru->lock);
295 return ret;
296}
297
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800298unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
299 list_lru_walk_cb isolate, void *cb_arg,
300 unsigned long *nr_to_walk)
301{
302 long isolated = 0;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800303
Sebastian Andrzej Siewior87a5ffc2018-08-17 15:49:45 -0700304 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
305 nr_to_walk);
Muchun Songbbca91c2022-03-22 14:41:25 -0700306
307#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800308 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
Muchun Songd701107042022-03-22 14:41:35 -0700309 struct list_lru_memcg *mlru;
Muchun Songbbca91c2022-03-22 14:41:25 -0700310 unsigned long index;
311
312 xa_for_each(&lru->xa, index, mlru) {
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700313 struct list_lru_node *nlru = &lru->node[nid];
314
315 spin_lock(&nlru->lock);
Muchun Songbbca91c2022-03-22 14:41:25 -0700316 isolated += __list_lru_walk_one(lru, nid, index,
Sebastian Andrzej Siewior6e018962018-08-17 15:49:51 -0700317 isolate, cb_arg,
318 nr_to_walk);
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700319 spin_unlock(&nlru->lock);
320
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800321 if (*nr_to_walk <= 0)
322 break;
323 }
324 }
Muchun Songbbca91c2022-03-22 14:41:25 -0700325#endif
326
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800327 return isolated;
328}
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000329EXPORT_SYMBOL_GPL(list_lru_walk_node);
330
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800331static void init_one_lru(struct list_lru_one *l)
332{
333 INIT_LIST_HEAD(&l->list);
334 l->nr_items = 0;
335}
336
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700337#ifdef CONFIG_MEMCG_KMEM
Muchun Songd701107042022-03-22 14:41:35 -0700338static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
Muchun Song88f2ef72022-03-22 14:40:56 -0700339{
340 int nid;
Muchun Songd701107042022-03-22 14:41:35 -0700341 struct list_lru_memcg *mlru;
Muchun Song88f2ef72022-03-22 14:40:56 -0700342
343 mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
344 if (!mlru)
345 return NULL;
346
347 for_each_node(nid)
348 init_one_lru(&mlru->node[nid]);
349
350 return mlru;
351}
352
Muchun Song5abc1e32022-03-22 14:41:19 -0700353static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800354{
Muchun Songd701107042022-03-22 14:41:35 -0700355 struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
Muchun Song5abc1e32022-03-22 14:41:19 -0700356
357 /*
358 * The __list_lru_walk_one() can walk the list of this node.
359 * We need kvfree_rcu() here. And the walking of the list
360 * is under lru->node[nid]->lock, which can serve as a RCU
361 * read-side critical section.
362 */
363 if (mlru)
364 kvfree_rcu(mlru, rcu);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800365}
366
Muchun Songbbca91c2022-03-22 14:41:25 -0700367static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800368{
Muchun Songbbca91c2022-03-22 14:41:25 -0700369 if (memcg_aware)
370 xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
Muchun Song6a6b7b72022-03-22 14:40:53 -0700371 lru->memcg_aware = memcg_aware;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800372}
373
Muchun Song6a6b7b72022-03-22 14:40:53 -0700374static void memcg_destroy_list_lru(struct list_lru *lru)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800375{
Muchun Songbbca91c2022-03-22 14:41:25 -0700376 XA_STATE(xas, &lru->xa, 0);
Muchun Songd701107042022-03-22 14:41:35 -0700377 struct list_lru_memcg *mlru;
Muchun Song6a6b7b72022-03-22 14:40:53 -0700378
379 if (!list_lru_memcg_aware(lru))
380 return;
381
Muchun Songbbca91c2022-03-22 14:41:25 -0700382 xas_lock_irq(&xas);
383 xas_for_each(&xas, mlru, ULONG_MAX) {
384 kfree(mlru);
385 xas_store(&xas, NULL);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800386 }
Muchun Songbbca91c2022-03-22 14:41:25 -0700387 xas_unlock_irq(&xas);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800388}
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800389
Muchun Song1f391eb2022-03-22 14:41:22 -0700390static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
391 int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800392{
Kirill Tkhai3b82c4d2018-08-17 15:48:01 -0700393 struct list_lru_node *nlru = &lru->node[nid];
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700394 int dst_idx = dst_memcg->kmemcg_id;
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800395 struct list_lru_one *src, *dst;
396
397 /*
398 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
399 * we have to use IRQ-safe primitives here to avoid deadlock.
400 */
401 spin_lock_irq(&nlru->lock);
402
Muchun Song6a6b7b72022-03-22 14:40:53 -0700403 src = list_lru_from_memcg_idx(lru, nid, src_idx);
Muchun Song5abc1e32022-03-22 14:41:19 -0700404 if (!src)
405 goto out;
Muchun Song6a6b7b72022-03-22 14:40:53 -0700406 dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800407
408 list_splice_init(&src->list, &dst->list);
Yang Shi8199be02020-12-05 22:14:48 -0800409
410 if (src->nr_items) {
411 dst->nr_items += src->nr_items;
Yang Shi2bfd3632021-05-04 18:36:11 -0700412 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
Yang Shi8199be02020-12-05 22:14:48 -0800413 src->nr_items = 0;
414 }
Muchun Song5abc1e32022-03-22 14:41:19 -0700415out:
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800416 spin_unlock_irq(&nlru->lock);
417}
418
Muchun Song1f391eb2022-03-22 14:41:22 -0700419static void memcg_reparent_list_lru(struct list_lru *lru,
420 int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800421{
422 int i;
423
Raghavendra K T145949a2015-11-05 18:46:26 -0800424 for_each_node(i)
Muchun Song1f391eb2022-03-22 14:41:22 -0700425 memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg);
Muchun Song5abc1e32022-03-22 14:41:19 -0700426
427 memcg_list_lru_free(lru, src_idx);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800428}
429
Muchun Song1f391eb2022-03-22 14:41:22 -0700430void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800431{
Muchun Song5abc1e32022-03-22 14:41:19 -0700432 struct cgroup_subsys_state *css;
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800433 struct list_lru *lru;
Muchun Song1f391eb2022-03-22 14:41:22 -0700434 int src_idx = memcg->kmemcg_id;
Muchun Song5abc1e32022-03-22 14:41:19 -0700435
436 /*
437 * Change kmemcg_id of this cgroup and all its descendants to the
438 * parent's id, and then move all entries from this cgroup's list_lrus
439 * to ones of the parent.
440 *
441 * After we have finished, all list_lrus corresponding to this cgroup
442 * are guaranteed to remain empty. So we can safely free this cgroup's
443 * list lrus in memcg_list_lru_free().
444 *
445 * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc()
446 * from allocating list lrus for this cgroup after memcg_list_lru_free()
447 * call.
448 */
449 rcu_read_lock();
Muchun Song1f391eb2022-03-22 14:41:22 -0700450 css_for_each_descendant_pre(css, &memcg->css) {
451 struct mem_cgroup *child;
Muchun Song5abc1e32022-03-22 14:41:19 -0700452
Muchun Song1f391eb2022-03-22 14:41:22 -0700453 child = mem_cgroup_from_css(css);
Muchun Songbbca91c2022-03-22 14:41:25 -0700454 WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id);
Muchun Song5abc1e32022-03-22 14:41:19 -0700455 }
456 rcu_read_unlock();
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800457
458 mutex_lock(&list_lrus_mutex);
Muchun Song3eef1122021-11-05 13:37:59 -0700459 list_for_each_entry(lru, &memcg_list_lrus, list)
Muchun Song1f391eb2022-03-22 14:41:22 -0700460 memcg_reparent_list_lru(lru, src_idx, parent);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800461 mutex_unlock(&list_lrus_mutex);
462}
Muchun Song88f2ef72022-03-22 14:40:56 -0700463
Muchun Songbbca91c2022-03-22 14:41:25 -0700464static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
465 struct list_lru *lru)
Muchun Song88f2ef72022-03-22 14:40:56 -0700466{
Muchun Songbbca91c2022-03-22 14:41:25 -0700467 int idx = memcg->kmemcg_id;
Muchun Song88f2ef72022-03-22 14:40:56 -0700468
Muchun Songbbca91c2022-03-22 14:41:25 -0700469 return idx < 0 || xa_load(&lru->xa, idx);
Muchun Song88f2ef72022-03-22 14:40:56 -0700470}
471
472int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
473 gfp_t gfp)
474{
475 int i;
476 unsigned long flags;
Muchun Song88f2ef72022-03-22 14:40:56 -0700477 struct list_lru_memcg_table {
Muchun Songd701107042022-03-22 14:41:35 -0700478 struct list_lru_memcg *mlru;
Muchun Song88f2ef72022-03-22 14:40:56 -0700479 struct mem_cgroup *memcg;
480 } *table;
Muchun Songbbca91c2022-03-22 14:41:25 -0700481 XA_STATE(xas, &lru->xa, 0);
Muchun Song88f2ef72022-03-22 14:40:56 -0700482
483 if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
484 return 0;
485
486 gfp &= GFP_RECLAIM_MASK;
487 table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
488 if (!table)
489 return -ENOMEM;
490
491 /*
492 * Because the list_lru can be reparented to the parent cgroup's
493 * list_lru, we should make sure that this cgroup and all its
Muchun Songd701107042022-03-22 14:41:35 -0700494 * ancestors have allocated list_lru_memcg.
Muchun Song88f2ef72022-03-22 14:40:56 -0700495 */
496 for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
497 if (memcg_list_lru_allocated(memcg, lru))
498 break;
499
500 table[i].memcg = memcg;
501 table[i].mlru = memcg_init_list_lru_one(gfp);
502 if (!table[i].mlru) {
503 while (i--)
504 kfree(table[i].mlru);
505 kfree(table);
506 return -ENOMEM;
507 }
508 }
509
Muchun Songbbca91c2022-03-22 14:41:25 -0700510 xas_lock_irqsave(&xas, flags);
Muchun Song88f2ef72022-03-22 14:40:56 -0700511 while (i--) {
Muchun Songbbca91c2022-03-22 14:41:25 -0700512 int index = READ_ONCE(table[i].memcg->kmemcg_id);
Muchun Songd701107042022-03-22 14:41:35 -0700513 struct list_lru_memcg *mlru = table[i].mlru;
Muchun Song88f2ef72022-03-22 14:40:56 -0700514
Muchun Songbbca91c2022-03-22 14:41:25 -0700515 xas_set(&xas, index);
516retry:
517 if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) {
Muchun Song5abc1e32022-03-22 14:41:19 -0700518 kfree(mlru);
Muchun Songbbca91c2022-03-22 14:41:25 -0700519 } else {
520 xas_store(&xas, mlru);
521 if (xas_error(&xas) == -ENOMEM) {
522 xas_unlock_irqrestore(&xas, flags);
523 if (xas_nomem(&xas, gfp))
524 xas_set_err(&xas, 0);
525 xas_lock_irqsave(&xas, flags);
526 /*
527 * The xas lock has been released, this memcg
528 * can be reparented before us. So reload
529 * memcg id. More details see the comments
530 * in memcg_reparent_list_lrus().
531 */
532 index = READ_ONCE(table[i].memcg->kmemcg_id);
533 if (index < 0)
534 xas_set_err(&xas, 0);
535 else if (!xas_error(&xas) && index != xas.xa_index)
536 xas_set(&xas, index);
537 goto retry;
538 }
539 }
Muchun Song88f2ef72022-03-22 14:40:56 -0700540 }
Muchun Songbbca91c2022-03-22 14:41:25 -0700541 /* xas_nomem() is used to free memory instead of memory allocation. */
542 if (xas.xa_alloc)
543 xas_nomem(&xas, gfp);
544 xas_unlock_irqrestore(&xas, flags);
Muchun Song88f2ef72022-03-22 14:40:56 -0700545 kfree(table);
546
Muchun Songbbca91c2022-03-22 14:41:25 -0700547 return xas_error(&xas);
Muchun Song88f2ef72022-03-22 14:40:56 -0700548}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800549#else
Muchun Songbbca91c2022-03-22 14:41:25 -0700550static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800551{
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800552}
553
554static void memcg_destroy_list_lru(struct list_lru *lru)
555{
556}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700557#endif /* CONFIG_MEMCG_KMEM */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800558
559int __list_lru_init(struct list_lru *lru, bool memcg_aware,
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700560 struct lock_class_key *key, struct shrinker *shrinker)
Dave Chinnera38e4082013-08-28 10:17:58 +1000561{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000562 int i;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800563
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700564#ifdef CONFIG_MEMCG_KMEM
565 if (shrinker)
566 lru->shrinker_id = shrinker->id;
567 else
568 lru->shrinker_id = -1;
569#endif
Glauber Costa5ca302c2013-08-28 10:18:18 +1000570
Alexey Dobriyanb9726c22019-03-05 15:48:26 -0800571 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000572 if (!lru->node)
Muchun Songbbca91c2022-03-22 14:41:25 -0700573 return -ENOMEM;
Dave Chinnera38e4082013-08-28 10:17:58 +1000574
Raghavendra K T145949a2015-11-05 18:46:26 -0800575 for_each_node(i) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000576 spin_lock_init(&lru->node[i].lock);
Johannes Weiner449dd692014-04-03 14:47:56 -0700577 if (key)
578 lockdep_set_class(&lru->node[i].lock, key);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800579 init_one_lru(&lru->node[i].lru);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000580 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800581
Muchun Songbbca91c2022-03-22 14:41:25 -0700582 memcg_init_list_lru(lru, memcg_aware);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800583 list_lru_register(lru);
Muchun Songbbca91c2022-03-22 14:41:25 -0700584
585 return 0;
Dave Chinnera38e4082013-08-28 10:17:58 +1000586}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800587EXPORT_SYMBOL_GPL(__list_lru_init);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000588
589void list_lru_destroy(struct list_lru *lru)
590{
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800591 /* Already destroyed or not yet initialized? */
592 if (!lru->node)
593 return;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800594
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800595 list_lru_unregister(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800596
597 memcg_destroy_list_lru(lru);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000598 kfree(lru->node);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800599 lru->node = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800600
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700601#ifdef CONFIG_MEMCG_KMEM
602 lru->shrinker_id = -1;
603#endif
Glauber Costa5ca302c2013-08-28 10:18:18 +1000604}
605EXPORT_SYMBOL_GPL(list_lru_destroy);