blob: 3f83b10c5031c09e8ac24b3e0605477753fc5651 [file] [log] [blame]
Roman Gushchin5035ebc2022-05-31 20:22:23 -07001// SPDX-License-Identifier: GPL-2.0
2#include <linux/idr.h>
3#include <linux/slab.h>
4#include <linux/debugfs.h>
5#include <linux/seq_file.h>
6#include <linux/shrinker.h>
7#include <linux/memcontrol.h>
Qi Zheng20cd1892023-03-13 19:28:16 +08008#include <linux/srcu.h>
Roman Gushchin5035ebc2022-05-31 20:22:23 -07009
10/* defined in vmscan.c */
Qi Zhengcf2e3092023-03-13 19:28:19 +080011extern struct mutex shrinker_mutex;
Roman Gushchin5035ebc2022-05-31 20:22:23 -070012extern struct list_head shrinker_list;
Qi Zheng20cd1892023-03-13 19:28:16 +080013extern struct srcu_struct shrinker_srcu;
Roman Gushchin5035ebc2022-05-31 20:22:23 -070014
15static DEFINE_IDA(shrinker_debugfs_ida);
16static struct dentry *shrinker_debugfs_root;
17
18static unsigned long shrinker_count_objects(struct shrinker *shrinker,
19 struct mem_cgroup *memcg,
20 unsigned long *count_per_node)
21{
22 unsigned long nr, total = 0;
23 int nid;
24
25 for_each_node(nid) {
26 if (nid == 0 || (shrinker->flags & SHRINKER_NUMA_AWARE)) {
27 struct shrink_control sc = {
28 .gfp_mask = GFP_KERNEL,
29 .nid = nid,
30 .memcg = memcg,
31 };
32
33 nr = shrinker->count_objects(shrinker, &sc);
34 if (nr == SHRINK_EMPTY)
35 nr = 0;
36 } else {
37 nr = 0;
38 }
39
40 count_per_node[nid] = nr;
41 total += nr;
42 }
43
44 return total;
45}
46
47static int shrinker_debugfs_count_show(struct seq_file *m, void *v)
48{
49 struct shrinker *shrinker = m->private;
50 unsigned long *count_per_node;
51 struct mem_cgroup *memcg;
52 unsigned long total;
53 bool memcg_aware;
Qi Zheng20cd1892023-03-13 19:28:16 +080054 int ret = 0, nid, srcu_idx;
Roman Gushchin5035ebc2022-05-31 20:22:23 -070055
56 count_per_node = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
57 if (!count_per_node)
58 return -ENOMEM;
59
Qi Zheng20cd1892023-03-13 19:28:16 +080060 srcu_idx = srcu_read_lock(&shrinker_srcu);
Roman Gushchin5035ebc2022-05-31 20:22:23 -070061
62 memcg_aware = shrinker->flags & SHRINKER_MEMCG_AWARE;
63
64 memcg = mem_cgroup_iter(NULL, NULL, NULL);
65 do {
66 if (memcg && !mem_cgroup_online(memcg))
67 continue;
68
69 total = shrinker_count_objects(shrinker,
70 memcg_aware ? memcg : NULL,
71 count_per_node);
72 if (total) {
73 seq_printf(m, "%lu", mem_cgroup_ino(memcg));
74 for_each_node(nid)
75 seq_printf(m, " %lu", count_per_node[nid]);
76 seq_putc(m, '\n');
77 }
78
79 if (!memcg_aware) {
80 mem_cgroup_iter_break(NULL, memcg);
81 break;
82 }
83
84 if (signal_pending(current)) {
85 mem_cgroup_iter_break(NULL, memcg);
86 ret = -EINTR;
87 break;
88 }
89 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
90
Qi Zheng20cd1892023-03-13 19:28:16 +080091 srcu_read_unlock(&shrinker_srcu, srcu_idx);
Roman Gushchin5035ebc2022-05-31 20:22:23 -070092
93 kfree(count_per_node);
94 return ret;
95}
96DEFINE_SHOW_ATTRIBUTE(shrinker_debugfs_count);
97
Roman Gushchinbbf535f2022-05-31 20:22:27 -070098static int shrinker_debugfs_scan_open(struct inode *inode, struct file *file)
99{
100 file->private_data = inode->i_private;
101 return nonseekable_open(inode, file);
102}
103
104static ssize_t shrinker_debugfs_scan_write(struct file *file,
105 const char __user *buf,
106 size_t size, loff_t *pos)
107{
108 struct shrinker *shrinker = file->private_data;
109 unsigned long nr_to_scan = 0, ino, read_len;
110 struct shrink_control sc = {
111 .gfp_mask = GFP_KERNEL,
112 };
113 struct mem_cgroup *memcg = NULL;
Qi Zheng20cd1892023-03-13 19:28:16 +0800114 int nid, srcu_idx;
Roman Gushchinbbf535f2022-05-31 20:22:27 -0700115 char kbuf[72];
Roman Gushchinbbf535f2022-05-31 20:22:27 -0700116
117 read_len = size < (sizeof(kbuf) - 1) ? size : (sizeof(kbuf) - 1);
118 if (copy_from_user(kbuf, buf, read_len))
119 return -EFAULT;
120 kbuf[read_len] = '\0';
121
122 if (sscanf(kbuf, "%lu %d %lu", &ino, &nid, &nr_to_scan) != 3)
123 return -EINVAL;
124
125 if (nid < 0 || nid >= nr_node_ids)
126 return -EINVAL;
127
128 if (nr_to_scan == 0)
129 return size;
130
131 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
132 memcg = mem_cgroup_get_from_ino(ino);
133 if (!memcg || IS_ERR(memcg))
134 return -ENOENT;
135
136 if (!mem_cgroup_online(memcg)) {
137 mem_cgroup_put(memcg);
138 return -ENOENT;
139 }
140 } else if (ino != 0) {
141 return -EINVAL;
142 }
143
Qi Zheng20cd1892023-03-13 19:28:16 +0800144 srcu_idx = srcu_read_lock(&shrinker_srcu);
Roman Gushchinbbf535f2022-05-31 20:22:27 -0700145
146 sc.nid = nid;
147 sc.memcg = memcg;
148 sc.nr_to_scan = nr_to_scan;
149 sc.nr_scanned = nr_to_scan;
150
151 shrinker->scan_objects(shrinker, &sc);
152
Qi Zheng20cd1892023-03-13 19:28:16 +0800153 srcu_read_unlock(&shrinker_srcu, srcu_idx);
Roman Gushchinbbf535f2022-05-31 20:22:27 -0700154 mem_cgroup_put(memcg);
155
156 return size;
157}
158
159static const struct file_operations shrinker_debugfs_scan_fops = {
160 .owner = THIS_MODULE,
161 .open = shrinker_debugfs_scan_open,
162 .write = shrinker_debugfs_scan_write,
163};
164
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700165int shrinker_debugfs_add(struct shrinker *shrinker)
166{
167 struct dentry *entry;
Roman Gushchine33c2672022-05-31 20:22:24 -0700168 char buf[128];
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700169 int id;
170
Qi Zhengcf2e3092023-03-13 19:28:19 +0800171 lockdep_assert_held(&shrinker_mutex);
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700172
173 /* debugfs isn't initialized yet, add debugfs entries later. */
174 if (!shrinker_debugfs_root)
175 return 0;
176
177 id = ida_alloc(&shrinker_debugfs_ida, GFP_KERNEL);
178 if (id < 0)
179 return id;
180 shrinker->debugfs_id = id;
181
Roman Gushchine33c2672022-05-31 20:22:24 -0700182 snprintf(buf, sizeof(buf), "%s-%d", shrinker->name, id);
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700183
184 /* create debugfs entry */
185 entry = debugfs_create_dir(buf, shrinker_debugfs_root);
186 if (IS_ERR(entry)) {
187 ida_free(&shrinker_debugfs_ida, id);
188 return PTR_ERR(entry);
189 }
190 shrinker->debugfs_entry = entry;
191
John Keeping2124f792023-04-18 11:19:05 +0100192 debugfs_create_file("count", 0440, entry, shrinker,
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700193 &shrinker_debugfs_count_fops);
John Keeping2124f792023-04-18 11:19:05 +0100194 debugfs_create_file("scan", 0220, entry, shrinker,
Roman Gushchinbbf535f2022-05-31 20:22:27 -0700195 &shrinker_debugfs_scan_fops);
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700196 return 0;
197}
198
Roman Gushchine33c2672022-05-31 20:22:24 -0700199int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
200{
201 struct dentry *entry;
202 char buf[128];
203 const char *new, *old;
204 va_list ap;
205 int ret = 0;
206
207 va_start(ap, fmt);
208 new = kvasprintf_const(GFP_KERNEL, fmt, ap);
209 va_end(ap);
210
211 if (!new)
212 return -ENOMEM;
213
Qi Zhengcf2e3092023-03-13 19:28:19 +0800214 mutex_lock(&shrinker_mutex);
Roman Gushchine33c2672022-05-31 20:22:24 -0700215
216 old = shrinker->name;
217 shrinker->name = new;
218
219 if (shrinker->debugfs_entry) {
220 snprintf(buf, sizeof(buf), "%s-%d", shrinker->name,
221 shrinker->debugfs_id);
222
223 entry = debugfs_rename(shrinker_debugfs_root,
224 shrinker->debugfs_entry,
225 shrinker_debugfs_root, buf);
226 if (IS_ERR(entry))
227 ret = PTR_ERR(entry);
228 else
229 shrinker->debugfs_entry = entry;
230 }
231
Qi Zhengcf2e3092023-03-13 19:28:19 +0800232 mutex_unlock(&shrinker_mutex);
Roman Gushchine33c2672022-05-31 20:22:24 -0700233
234 kfree_const(old);
235
236 return ret;
237}
238EXPORT_SYMBOL(shrinker_debugfs_rename);
239
Qi Zhengbadc28d2023-02-02 18:56:12 +0800240struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700241{
Qi Zhengbadc28d2023-02-02 18:56:12 +0800242 struct dentry *entry = shrinker->debugfs_entry;
243
Qi Zhengcf2e3092023-03-13 19:28:19 +0800244 lockdep_assert_held(&shrinker_mutex);
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700245
Roman Gushchine33c2672022-05-31 20:22:24 -0700246 kfree_const(shrinker->name);
Tetsuo Handa14773bf2022-07-20 23:47:55 +0900247 shrinker->name = NULL;
Roman Gushchine33c2672022-05-31 20:22:24 -0700248
Qi Zhengbadc28d2023-02-02 18:56:12 +0800249 if (entry) {
250 ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
251 shrinker->debugfs_entry = NULL;
252 }
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700253
Qi Zhengbadc28d2023-02-02 18:56:12 +0800254 return entry;
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700255}
256
257static int __init shrinker_debugfs_init(void)
258{
259 struct shrinker *shrinker;
260 struct dentry *dentry;
261 int ret = 0;
262
263 dentry = debugfs_create_dir("shrinker", NULL);
264 if (IS_ERR(dentry))
265 return PTR_ERR(dentry);
266 shrinker_debugfs_root = dentry;
267
268 /* Create debugfs entries for shrinkers registered at boot */
Qi Zhengcf2e3092023-03-13 19:28:19 +0800269 mutex_lock(&shrinker_mutex);
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700270 list_for_each_entry(shrinker, &shrinker_list, list)
271 if (!shrinker->debugfs_entry) {
272 ret = shrinker_debugfs_add(shrinker);
273 if (ret)
274 break;
275 }
Qi Zhengcf2e3092023-03-13 19:28:19 +0800276 mutex_unlock(&shrinker_mutex);
Roman Gushchin5035ebc2022-05-31 20:22:23 -0700277
278 return ret;
279}
280late_initcall(shrinker_debugfs_init);