blob: 648d2a85523abe613690c9890fd0729cd5a21a40 [file] [log] [blame]
SeongJae Park43b05362021-11-05 13:47:57 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * DAMON-based page reclamation
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8#define pr_fmt(fmt) "damon-reclaim: " fmt
9
10#include <linux/damon.h>
Christophe JAILLETe6aff382022-11-01 22:14:08 +010011#include <linux/kstrtox.h>
SeongJae Park43b05362021-11-05 13:47:57 -070012#include <linux/module.h>
SeongJae Park43b05362021-11-05 13:47:57 -070013
SeongJae Parkfdfc1192022-09-13 17:44:38 +000014#include "modules-common.h"
15
SeongJae Park43b05362021-11-05 13:47:57 -070016#ifdef MODULE_PARAM_PREFIX
17#undef MODULE_PARAM_PREFIX
18#endif
19#define MODULE_PARAM_PREFIX "damon_reclaim."
20
21/*
22 * Enable or disable DAMON_RECLAIM.
23 *
24 * You can enable DAMON_RCLAIM by setting the value of this parameter as ``Y``.
25 * Setting it as ``N`` disables DAMON_RECLAIM. Note that DAMON_RECLAIM could
26 * do no real monitoring and reclamation due to the watermarks-based activation
27 * condition. Refer to below descriptions for the watermarks parameter for
28 * this.
29 */
30static bool enabled __read_mostly;
SeongJae Park43b05362021-11-05 13:47:57 -070031
32/*
SeongJae Parke035c282022-05-09 18:20:56 -070033 * Make DAMON_RECLAIM reads the input parameters again, except ``enabled``.
34 *
35 * Input parameters that updated while DAMON_RECLAIM is running are not applied
36 * by default. Once this parameter is set as ``Y``, DAMON_RECLAIM reads values
37 * of parametrs except ``enabled`` again. Once the re-reading is done, this
38 * parameter is set as ``N``. If invalid parameters are found while the
39 * re-reading, DAMON_RECLAIM will be disabled.
40 */
41static bool commit_inputs __read_mostly;
42module_param(commit_inputs, bool, 0600);
43
44/*
SeongJae Park43b05362021-11-05 13:47:57 -070045 * Time threshold for cold memory regions identification in microseconds.
46 *
47 * If a memory region is not accessed for this or longer time, DAMON_RECLAIM
48 * identifies the region as cold, and reclaims. 120 seconds by default.
49 */
50static unsigned long min_age __read_mostly = 120000000;
51module_param(min_age, ulong, 0600);
52
SeongJae Parka9d57c72022-09-13 17:44:47 +000053static struct damos_quota damon_reclaim_quota = {
54 /* use up to 10 ms time, reclaim up to 128 MiB per 1 sec by default */
55 .ms = 10,
56 .sz = 128 * 1024 * 1024,
57 .reset_interval = 1000,
58 /* Within the quota, page out older regions first. */
59 .weight_sz = 0,
60 .weight_nr_accesses = 0,
61 .weight_age = 1
62};
63DEFINE_DAMON_MODULES_DAMOS_QUOTAS(damon_reclaim_quota);
SeongJae Park43b05362021-11-05 13:47:57 -070064
Yang Yingliang81f8f572022-09-15 10:10:23 +080065static struct damos_watermarks damon_reclaim_wmarks = {
SeongJae Park34f47ea2022-09-13 17:44:41 +000066 .metric = DAMOS_WMARK_FREE_MEM_RATE,
67 .interval = 5000000, /* 5 seconds */
68 .high = 500, /* 50 percent */
69 .mid = 400, /* 40 percent */
70 .low = 200, /* 20 percent */
71};
72DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_reclaim_wmarks);
SeongJae Park43b05362021-11-05 13:47:57 -070073
SeongJae Park8c341ae2022-09-13 17:44:34 +000074static struct damon_attrs damon_reclaim_mon_attrs = {
SeongJae Parkfdfc1192022-09-13 17:44:38 +000075 .sample_interval = 5000, /* 5 ms */
76 .aggr_interval = 100000, /* 100 ms */
SeongJae Park8c341ae2022-09-13 17:44:34 +000077 .ops_update_interval = 0,
78 .min_nr_regions = 10,
79 .max_nr_regions = 1000,
80};
SeongJae Parkfdfc1192022-09-13 17:44:38 +000081DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_reclaim_mon_attrs);
SeongJae Park43b05362021-11-05 13:47:57 -070082
83/*
84 * Start of the target memory region in physical address.
85 *
86 * The start physical address of memory region that DAMON_RECLAIM will do work
87 * against. By default, biggest System RAM is used as the region.
88 */
89static unsigned long monitor_region_start __read_mostly;
90module_param(monitor_region_start, ulong, 0600);
91
92/*
93 * End of the target memory region in physical address.
94 *
95 * The end physical address of memory region that DAMON_RECLAIM will do work
96 * against. By default, biggest System RAM is used as the region.
97 */
98static unsigned long monitor_region_end __read_mostly;
99module_param(monitor_region_end, ulong, 0600);
100
101/*
SeongJae Park66d9fae2022-12-05 23:08:22 +0000102 * Skip anonymous pages reclamation.
103 *
104 * If this parameter is set as ``Y``, DAMON_RECLAIM does not reclaim anonymous
105 * pages. By default, ``N``.
106 */
107static bool skip_anon __read_mostly;
108module_param(skip_anon, bool, 0600);
109
110/*
SeongJae Park43b05362021-11-05 13:47:57 -0700111 * PID of the DAMON thread
112 *
113 * If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread.
114 * Else, -1.
115 */
116static int kdamond_pid __read_mostly = -1;
117module_param(kdamond_pid, int, 0400);
118
SeongJae Parkb71f3ea2022-09-13 17:44:43 +0000119static struct damos_stat damon_reclaim_stat;
120DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_reclaim_stat,
121 reclaim_tried_regions, reclaimed_regions, quota_exceeds);
SeongJae Park60e52e72022-01-14 14:10:23 -0800122
SeongJae Park43b05362021-11-05 13:47:57 -0700123static struct damon_ctx *ctx;
124static struct damon_target *target;
125
SeongJae Park43b05362021-11-05 13:47:57 -0700126static struct damos *damon_reclaim_new_scheme(void)
127{
Yajun Dengf5a79d72022-09-08 19:14:43 +0000128 struct damos_access_pattern pattern = {
129 /* Find regions having PAGE_SIZE or larger size */
130 .min_sz_region = PAGE_SIZE,
131 .max_sz_region = ULONG_MAX,
132 /* and not accessed at all */
133 .min_nr_accesses = 0,
134 .max_nr_accesses = 0,
135 /* for min_age or more micro-seconds */
SeongJae Park8c341ae2022-09-13 17:44:34 +0000136 .min_age_region = min_age /
137 damon_reclaim_mon_attrs.aggr_interval,
Yajun Dengf5a79d72022-09-08 19:14:43 +0000138 .max_age_region = UINT_MAX,
139 };
Yajun Dengf5a79d72022-09-08 19:14:43 +0000140
141 return damon_new_scheme(
142 &pattern,
SeongJae Park43b05362021-11-05 13:47:57 -0700143 /* page out those, as soon as found */
144 DAMOS_PAGEOUT,
145 /* under the quota. */
SeongJae Parka9d57c72022-09-13 17:44:47 +0000146 &damon_reclaim_quota,
SeongJae Park43b05362021-11-05 13:47:57 -0700147 /* (De)activate this according to the watermarks. */
SeongJae Park34f47ea2022-09-13 17:44:41 +0000148 &damon_reclaim_wmarks);
SeongJae Park43b05362021-11-05 13:47:57 -0700149}
150
SeongJae Parke035c282022-05-09 18:20:56 -0700151static int damon_reclaim_apply_parameters(void)
SeongJae Park43b05362021-11-05 13:47:57 -0700152{
SeongJae Park43b05362021-11-05 13:47:57 -0700153 struct damos *scheme;
SeongJae Park66d9fae2022-12-05 23:08:22 +0000154 struct damos_filter *filter;
SeongJae Parke035c282022-05-09 18:20:56 -0700155 int err = 0;
SeongJae Park43b05362021-11-05 13:47:57 -0700156
SeongJae Park8c341ae2022-09-13 17:44:34 +0000157 err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
SeongJae Park43b05362021-11-05 13:47:57 -0700158 if (err)
159 return err;
160
SeongJae Parke035c282022-05-09 18:20:56 -0700161 /* Will be freed by next 'damon_set_schemes()' below */
162 scheme = damon_reclaim_new_scheme();
163 if (!scheme)
164 return -ENOMEM;
SeongJae Park66d9fae2022-12-05 23:08:22 +0000165 if (skip_anon) {
166 filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
167 if (!filter) {
168 /* Will be freed by next 'damon_set_schemes()' below */
169 damon_destroy_scheme(scheme);
170 return -ENOMEM;
171 }
172 damos_add_filter(scheme, filter);
173 }
Kaixu Xiacc713522022-09-16 23:20:35 +0800174 damon_set_schemes(ctx, &scheme, 1);
SeongJae Parke035c282022-05-09 18:20:56 -0700175
Kaixu Xia233f0b32022-09-20 16:53:22 +0000176 return damon_set_region_biggest_system_ram_default(target,
177 &monitor_region_start,
178 &monitor_region_end);
SeongJae Parke035c282022-05-09 18:20:56 -0700179}
SeongJae Park43b05362021-11-05 13:47:57 -0700180
SeongJae Parke035c282022-05-09 18:20:56 -0700181static int damon_reclaim_turn(bool on)
182{
183 int err;
184
185 if (!on) {
186 err = damon_stop(&ctx, 1);
187 if (!err)
188 kdamond_pid = -1;
189 return err;
SeongJae Park43b05362021-11-05 13:47:57 -0700190 }
SeongJae Parke035c282022-05-09 18:20:56 -0700191
192 err = damon_reclaim_apply_parameters();
SeongJae Park43b05362021-11-05 13:47:57 -0700193 if (err)
SeongJae Parke035c282022-05-09 18:20:56 -0700194 return err;
SeongJae Park43b05362021-11-05 13:47:57 -0700195
SeongJae Park8b9b0d32022-03-22 14:49:21 -0700196 err = damon_start(&ctx, 1, true);
SeongJae Parke035c282022-05-09 18:20:56 -0700197 if (err)
198 return err;
199 kdamond_pid = ctx->kdamond->pid;
200 return 0;
SeongJae Park43b05362021-11-05 13:47:57 -0700201}
202
SeongJae Parkd79905c2022-06-06 18:23:10 +0000203static int damon_reclaim_enabled_store(const char *val,
Hailong Tu059342d2022-04-29 14:37:00 -0700204 const struct kernel_param *kp)
205{
SeongJae Park04e98762022-10-25 17:36:47 +0000206 bool is_enabled = enabled;
207 bool enable;
208 int err;
Hailong Tu059342d2022-04-29 14:37:00 -0700209
Christophe JAILLETe6aff382022-11-01 22:14:08 +0100210 err = kstrtobool(val, &enable);
SeongJae Park04e98762022-10-25 17:36:47 +0000211 if (err)
212 return err;
Hailong Tu059342d2022-04-29 14:37:00 -0700213
SeongJae Park04e98762022-10-25 17:36:47 +0000214 if (is_enabled == enable)
215 return 0;
SeongJae Park29492822022-06-04 19:50:51 +0000216
SeongJae Park04e98762022-10-25 17:36:47 +0000217 /* Called before init function. The function will handle this. */
218 if (!ctx)
219 goto set_param_out;
220
221 err = damon_reclaim_turn(enable);
222 if (err)
223 return err;
224
225set_param_out:
226 enabled = enable;
227 return err;
Hailong Tu059342d2022-04-29 14:37:00 -0700228}
229
230static const struct kernel_param_ops enabled_param_ops = {
SeongJae Parkd79905c2022-06-06 18:23:10 +0000231 .set = damon_reclaim_enabled_store,
Hailong Tu059342d2022-04-29 14:37:00 -0700232 .get = param_get_bool,
233};
234
235module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
236MODULE_PARM_DESC(enabled,
237 "Enable or disable DAMON_RECLAIM (default: disabled)");
238
SeongJae Parkf25ab3b2022-06-06 18:23:07 +0000239static int damon_reclaim_handle_commit_inputs(void)
240{
241 int err;
242
243 if (!commit_inputs)
244 return 0;
245
246 err = damon_reclaim_apply_parameters();
247 commit_inputs = false;
248 return err;
249}
250
SeongJae Park60e52e72022-01-14 14:10:23 -0800251static int damon_reclaim_after_aggregation(struct damon_ctx *c)
252{
253 struct damos *s;
254
255 /* update the stats parameter */
SeongJae Parkb71f3ea2022-09-13 17:44:43 +0000256 damon_for_each_scheme(s, c)
257 damon_reclaim_stat = s->stat;
SeongJae Parke035c282022-05-09 18:20:56 -0700258
SeongJae Parkf25ab3b2022-06-06 18:23:07 +0000259 return damon_reclaim_handle_commit_inputs();
SeongJae Parke035c282022-05-09 18:20:56 -0700260}
261
262static int damon_reclaim_after_wmarks_check(struct damon_ctx *c)
263{
SeongJae Parkf25ab3b2022-06-06 18:23:07 +0000264 return damon_reclaim_handle_commit_inputs();
SeongJae Park60e52e72022-01-14 14:10:23 -0800265}
266
SeongJae Park43b05362021-11-05 13:47:57 -0700267static int __init damon_reclaim_init(void)
268{
SeongJae Park7ae2c172022-10-26 22:59:42 +0000269 int err = damon_modules_new_paddr_ctx_target(&ctx, &target);
SeongJae Park43b05362021-11-05 13:47:57 -0700270
SeongJae Park7ae2c172022-10-26 22:59:42 +0000271 if (err)
272 return err;
SeongJae Park4d69c342022-03-22 14:48:55 -0700273
SeongJae Parke035c282022-05-09 18:20:56 -0700274 ctx->callback.after_wmarks_check = damon_reclaim_after_wmarks_check;
SeongJae Park60e52e72022-01-14 14:10:23 -0800275 ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
SeongJae Park43b05362021-11-05 13:47:57 -0700276
SeongJae Park04e98762022-10-25 17:36:47 +0000277 /* 'enabled' has set before this function, probably via command line */
278 if (enabled)
279 err = damon_reclaim_turn(true);
SeongJae Park29492822022-06-04 19:50:51 +0000280
SeongJae Park04e98762022-10-25 17:36:47 +0000281 return err;
SeongJae Park43b05362021-11-05 13:47:57 -0700282}
283
284module_init(damon_reclaim_init);