SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * DAMON-based LRU-lists Sorting |
| 4 | * |
| 5 | * Author: SeongJae Park <sj@kernel.org> |
| 6 | */ |
| 7 | |
| 8 | #define pr_fmt(fmt) "damon-lru-sort: " fmt |
| 9 | |
| 10 | #include <linux/damon.h> |
Christophe JAILLET | e6aff38 | 2022-11-01 22:14:08 +0100 | [diff] [blame] | 11 | #include <linux/kstrtox.h> |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 12 | #include <linux/module.h> |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 13 | |
SeongJae Park | 95f7c05 | 2022-09-13 17:44:37 +0000 | [diff] [blame] | 14 | #include "modules-common.h" |
| 15 | |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 16 | #ifdef MODULE_PARAM_PREFIX |
| 17 | #undef MODULE_PARAM_PREFIX |
| 18 | #endif |
| 19 | #define MODULE_PARAM_PREFIX "damon_lru_sort." |
| 20 | |
| 21 | /* |
| 22 | * Enable or disable DAMON_LRU_SORT. |
| 23 | * |
| 24 | * You can enable DAMON_LRU_SORT by setting the value of this parameter as |
| 25 | * ``Y``. Setting it as ``N`` disables DAMON_LRU_SORT. Note that |
| 26 | * DAMON_LRU_SORT could do no real monitoring and LRU-lists sorting due to the |
| 27 | * watermarks-based activation condition. Refer to below descriptions for the |
| 28 | * watermarks parameter for this. |
| 29 | */ |
| 30 | static bool enabled __read_mostly; |
| 31 | |
| 32 | /* |
| 33 | * Make DAMON_LRU_SORT reads the input parameters again, except ``enabled``. |
| 34 | * |
| 35 | * Input parameters that updated while DAMON_LRU_SORT is running are not |
| 36 | * applied by default. Once this parameter is set as ``Y``, DAMON_LRU_SORT |
| 37 | * reads values of parametrs except ``enabled`` again. Once the re-reading is |
| 38 | * done, this parameter is set as ``N``. If invalid parameters are found while |
| 39 | * the re-reading, DAMON_LRU_SORT will be disabled. |
| 40 | */ |
| 41 | static bool commit_inputs __read_mostly; |
| 42 | module_param(commit_inputs, bool, 0600); |
| 43 | |
| 44 | /* |
| 45 | * Access frequency threshold for hot memory regions identification in permil. |
| 46 | * |
| 47 | * If a memory region is accessed in frequency of this or higher, |
| 48 | * DAMON_LRU_SORT identifies the region as hot, and mark it as accessed on the |
| 49 | * LRU list, so that it could not be reclaimed under memory pressure. 50% by |
| 50 | * default. |
| 51 | */ |
| 52 | static unsigned long hot_thres_access_freq = 500; |
| 53 | module_param(hot_thres_access_freq, ulong, 0600); |
| 54 | |
| 55 | /* |
| 56 | * Time threshold for cold memory regions identification in microseconds. |
| 57 | * |
| 58 | * If a memory region is not accessed for this or longer time, DAMON_LRU_SORT |
| 59 | * identifies the region as cold, and mark it as unaccessed on the LRU list, so |
| 60 | * that it could be reclaimed first under memory pressure. 120 seconds by |
| 61 | * default. |
| 62 | */ |
| 63 | static unsigned long cold_min_age __read_mostly = 120000000; |
| 64 | module_param(cold_min_age, ulong, 0600); |
| 65 | |
SeongJae Park | 45b8212f | 2022-09-13 17:44:48 +0000 | [diff] [blame] | 66 | static struct damos_quota damon_lru_sort_quota = { |
| 67 | /* Use up to 10 ms per 1 sec, by default */ |
| 68 | .ms = 10, |
| 69 | .sz = 0, |
| 70 | .reset_interval = 1000, |
| 71 | /* Within the quota, mark hotter regions accessed first. */ |
| 72 | .weight_sz = 0, |
| 73 | .weight_nr_accesses = 1, |
| 74 | .weight_age = 0, |
| 75 | }; |
| 76 | DEFINE_DAMON_MODULES_DAMOS_TIME_QUOTA(damon_lru_sort_quota); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 77 | |
Yang Yingliang | e47b082 | 2022-09-15 10:10:24 +0800 | [diff] [blame] | 78 | static struct damos_watermarks damon_lru_sort_wmarks = { |
SeongJae Park | 6517d2d | 2022-09-13 17:44:40 +0000 | [diff] [blame] | 79 | .metric = DAMOS_WMARK_FREE_MEM_RATE, |
| 80 | .interval = 5000000, /* 5 seconds */ |
| 81 | .high = 200, /* 20 percent */ |
| 82 | .mid = 150, /* 15 percent */ |
| 83 | .low = 50, /* 5 percent */ |
| 84 | }; |
| 85 | DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_lru_sort_wmarks); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 86 | |
SeongJae Park | 135e128 | 2022-09-13 17:44:35 +0000 | [diff] [blame] | 87 | static struct damon_attrs damon_lru_sort_mon_attrs = { |
SeongJae Park | 95f7c05 | 2022-09-13 17:44:37 +0000 | [diff] [blame] | 88 | .sample_interval = 5000, /* 5 ms */ |
| 89 | .aggr_interval = 100000, /* 100 ms */ |
SeongJae Park | 135e128 | 2022-09-13 17:44:35 +0000 | [diff] [blame] | 90 | .ops_update_interval = 0, |
| 91 | .min_nr_regions = 10, |
| 92 | .max_nr_regions = 1000, |
| 93 | }; |
SeongJae Park | 95f7c05 | 2022-09-13 17:44:37 +0000 | [diff] [blame] | 94 | DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_lru_sort_mon_attrs); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 95 | |
| 96 | /* |
| 97 | * Start of the target memory region in physical address. |
| 98 | * |
| 99 | * The start physical address of memory region that DAMON_LRU_SORT will do work |
| 100 | * against. By default, biggest System RAM is used as the region. |
| 101 | */ |
| 102 | static unsigned long monitor_region_start __read_mostly; |
| 103 | module_param(monitor_region_start, ulong, 0600); |
| 104 | |
| 105 | /* |
| 106 | * End of the target memory region in physical address. |
| 107 | * |
| 108 | * The end physical address of memory region that DAMON_LRU_SORT will do work |
| 109 | * against. By default, biggest System RAM is used as the region. |
| 110 | */ |
| 111 | static unsigned long monitor_region_end __read_mostly; |
| 112 | module_param(monitor_region_end, ulong, 0600); |
| 113 | |
| 114 | /* |
| 115 | * PID of the DAMON thread |
| 116 | * |
| 117 | * If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread. |
| 118 | * Else, -1. |
| 119 | */ |
| 120 | static int kdamond_pid __read_mostly = -1; |
| 121 | module_param(kdamond_pid, int, 0400); |
| 122 | |
SeongJae Park | dd172fb | 2022-09-13 17:44:44 +0000 | [diff] [blame] | 123 | static struct damos_stat damon_lru_sort_hot_stat; |
| 124 | DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_hot_stat, |
| 125 | lru_sort_tried_hot_regions, lru_sorted_hot_regions, |
| 126 | hot_quota_exceeds); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 127 | |
SeongJae Park | dd172fb | 2022-09-13 17:44:44 +0000 | [diff] [blame] | 128 | static struct damos_stat damon_lru_sort_cold_stat; |
| 129 | DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_cold_stat, |
| 130 | lru_sort_tried_cold_regions, lru_sorted_cold_regions, |
| 131 | cold_quota_exceeds); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 132 | |
Xin Hao | a07b8ea | 2022-09-15 13:30:41 +0000 | [diff] [blame] | 133 | static struct damos_access_pattern damon_lru_sort_stub_pattern = { |
| 134 | /* Find regions having PAGE_SIZE or larger size */ |
| 135 | .min_sz_region = PAGE_SIZE, |
| 136 | .max_sz_region = ULONG_MAX, |
| 137 | /* no matter its access frequency */ |
| 138 | .min_nr_accesses = 0, |
| 139 | .max_nr_accesses = UINT_MAX, |
| 140 | /* no matter its age */ |
| 141 | .min_age_region = 0, |
| 142 | .max_age_region = UINT_MAX, |
| 143 | }; |
| 144 | |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 145 | static struct damon_ctx *ctx; |
| 146 | static struct damon_target *target; |
| 147 | |
SeongJae Park | a62518a | 2022-09-13 17:44:49 +0000 | [diff] [blame] | 148 | static struct damos *damon_lru_sort_new_scheme( |
| 149 | struct damos_access_pattern *pattern, enum damos_action action) |
| 150 | { |
| 151 | struct damos_quota quota = damon_lru_sort_quota; |
| 152 | |
| 153 | /* Use half of total quota for hot/cold pages sorting */ |
| 154 | quota.ms = quota.ms / 2; |
| 155 | |
| 156 | return damon_new_scheme( |
| 157 | /* find the pattern, and */ |
| 158 | pattern, |
| 159 | /* (de)prioritize on LRU-lists */ |
| 160 | action, |
| 161 | /* under the quota. */ |
| 162 | "a, |
| 163 | /* (De)activate this according to the watermarks. */ |
| 164 | &damon_lru_sort_wmarks); |
| 165 | } |
| 166 | |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 167 | /* Create a DAMON-based operation scheme for hot memory regions */ |
| 168 | static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres) |
| 169 | { |
Xin Hao | a07b8ea | 2022-09-15 13:30:41 +0000 | [diff] [blame] | 170 | struct damos_access_pattern pattern = damon_lru_sort_stub_pattern; |
SeongJae Park | 45b8212f | 2022-09-13 17:44:48 +0000 | [diff] [blame] | 171 | |
Xin Hao | a07b8ea | 2022-09-15 13:30:41 +0000 | [diff] [blame] | 172 | pattern.min_nr_accesses = hot_thres; |
SeongJae Park | a62518a | 2022-09-13 17:44:49 +0000 | [diff] [blame] | 173 | return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_PRIO); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | /* Create a DAMON-based operation scheme for cold memory regions */ |
| 177 | static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres) |
| 178 | { |
Xin Hao | a07b8ea | 2022-09-15 13:30:41 +0000 | [diff] [blame] | 179 | struct damos_access_pattern pattern = damon_lru_sort_stub_pattern; |
SeongJae Park | 45b8212f | 2022-09-13 17:44:48 +0000 | [diff] [blame] | 180 | |
Xin Hao | a07b8ea | 2022-09-15 13:30:41 +0000 | [diff] [blame] | 181 | pattern.max_nr_accesses = 0; |
| 182 | pattern.min_age_region = cold_thres; |
SeongJae Park | a62518a | 2022-09-13 17:44:49 +0000 | [diff] [blame] | 183 | return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 184 | } |
| 185 | |
| 186 | static int damon_lru_sort_apply_parameters(void) |
| 187 | { |
Xin Hao | 3791bc7 | 2022-09-11 08:59:17 +0800 | [diff] [blame] | 188 | struct damos *scheme; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 189 | unsigned int hot_thres, cold_thres; |
| 190 | int err = 0; |
| 191 | |
SeongJae Park | 135e128 | 2022-09-13 17:44:35 +0000 | [diff] [blame] | 192 | err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 193 | if (err) |
| 194 | return err; |
| 195 | |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 196 | /* aggr_interval / sample_interval is the maximum nr_accesses */ |
SeongJae Park | 135e128 | 2022-09-13 17:44:35 +0000 | [diff] [blame] | 197 | hot_thres = damon_lru_sort_mon_attrs.aggr_interval / |
| 198 | damon_lru_sort_mon_attrs.sample_interval * |
| 199 | hot_thres_access_freq / 1000; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 200 | scheme = damon_lru_sort_new_hot_scheme(hot_thres); |
| 201 | if (!scheme) |
| 202 | return -ENOMEM; |
Kaixu Xia | cc71352 | 2022-09-16 23:20:35 +0800 | [diff] [blame] | 203 | damon_set_schemes(ctx, &scheme, 1); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 204 | |
SeongJae Park | 135e128 | 2022-09-13 17:44:35 +0000 | [diff] [blame] | 205 | cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 206 | scheme = damon_lru_sort_new_cold_scheme(cold_thres); |
| 207 | if (!scheme) |
| 208 | return -ENOMEM; |
| 209 | damon_add_scheme(ctx, scheme); |
| 210 | |
Kaixu Xia | 233f0b3 | 2022-09-20 16:53:22 +0000 | [diff] [blame] | 211 | return damon_set_region_biggest_system_ram_default(target, |
| 212 | &monitor_region_start, |
| 213 | &monitor_region_end); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | static int damon_lru_sort_turn(bool on) |
| 217 | { |
| 218 | int err; |
| 219 | |
| 220 | if (!on) { |
| 221 | err = damon_stop(&ctx, 1); |
| 222 | if (!err) |
| 223 | kdamond_pid = -1; |
| 224 | return err; |
| 225 | } |
| 226 | |
| 227 | err = damon_lru_sort_apply_parameters(); |
| 228 | if (err) |
| 229 | return err; |
| 230 | |
| 231 | err = damon_start(&ctx, 1, true); |
| 232 | if (err) |
| 233 | return err; |
| 234 | kdamond_pid = ctx->kdamond->pid; |
| 235 | return 0; |
| 236 | } |
| 237 | |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 238 | static int damon_lru_sort_enabled_store(const char *val, |
| 239 | const struct kernel_param *kp) |
| 240 | { |
SeongJae Park | 7a034fb | 2022-10-25 17:36:49 +0000 | [diff] [blame] | 241 | bool is_enabled = enabled; |
| 242 | bool enable; |
| 243 | int err; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 244 | |
Christophe JAILLET | e6aff38 | 2022-11-01 22:14:08 +0100 | [diff] [blame] | 245 | err = kstrtobool(val, &enable); |
SeongJae Park | 7a034fb | 2022-10-25 17:36:49 +0000 | [diff] [blame] | 246 | if (err) |
| 247 | return err; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 248 | |
SeongJae Park | 7a034fb | 2022-10-25 17:36:49 +0000 | [diff] [blame] | 249 | if (is_enabled == enable) |
| 250 | return 0; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 251 | |
SeongJae Park | 7a034fb | 2022-10-25 17:36:49 +0000 | [diff] [blame] | 252 | /* Called before init function. The function will handle this. */ |
| 253 | if (!ctx) |
| 254 | goto set_param_out; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 255 | |
SeongJae Park | 7a034fb | 2022-10-25 17:36:49 +0000 | [diff] [blame] | 256 | err = damon_lru_sort_turn(enable); |
| 257 | if (err) |
| 258 | return err; |
| 259 | |
| 260 | set_param_out: |
| 261 | enabled = enable; |
| 262 | return err; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | static const struct kernel_param_ops enabled_param_ops = { |
| 266 | .set = damon_lru_sort_enabled_store, |
| 267 | .get = param_get_bool, |
| 268 | }; |
| 269 | |
| 270 | module_param_cb(enabled, &enabled_param_ops, &enabled, 0600); |
| 271 | MODULE_PARM_DESC(enabled, |
| 272 | "Enable or disable DAMON_LRU_SORT (default: disabled)"); |
| 273 | |
| 274 | static int damon_lru_sort_handle_commit_inputs(void) |
| 275 | { |
| 276 | int err; |
| 277 | |
| 278 | if (!commit_inputs) |
| 279 | return 0; |
| 280 | |
| 281 | err = damon_lru_sort_apply_parameters(); |
| 282 | commit_inputs = false; |
| 283 | return err; |
| 284 | } |
| 285 | |
| 286 | static int damon_lru_sort_after_aggregation(struct damon_ctx *c) |
| 287 | { |
| 288 | struct damos *s; |
| 289 | |
| 290 | /* update the stats parameter */ |
| 291 | damon_for_each_scheme(s, c) { |
SeongJae Park | dd172fb | 2022-09-13 17:44:44 +0000 | [diff] [blame] | 292 | if (s->action == DAMOS_LRU_PRIO) |
| 293 | damon_lru_sort_hot_stat = s->stat; |
| 294 | else if (s->action == DAMOS_LRU_DEPRIO) |
| 295 | damon_lru_sort_cold_stat = s->stat; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | return damon_lru_sort_handle_commit_inputs(); |
| 299 | } |
| 300 | |
| 301 | static int damon_lru_sort_after_wmarks_check(struct damon_ctx *c) |
| 302 | { |
| 303 | return damon_lru_sort_handle_commit_inputs(); |
| 304 | } |
| 305 | |
| 306 | static int __init damon_lru_sort_init(void) |
| 307 | { |
SeongJae Park | 7ae2c17 | 2022-10-26 22:59:42 +0000 | [diff] [blame] | 308 | int err = damon_modules_new_paddr_ctx_target(&ctx, &target); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 309 | |
SeongJae Park | 7ae2c17 | 2022-10-26 22:59:42 +0000 | [diff] [blame] | 310 | if (err) |
| 311 | return err; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 312 | |
| 313 | ctx->callback.after_wmarks_check = damon_lru_sort_after_wmarks_check; |
| 314 | ctx->callback.after_aggregation = damon_lru_sort_after_aggregation; |
| 315 | |
SeongJae Park | 7a034fb | 2022-10-25 17:36:49 +0000 | [diff] [blame] | 316 | /* 'enabled' has set before this function, probably via command line */ |
| 317 | if (enabled) |
| 318 | err = damon_lru_sort_turn(true); |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 319 | |
SeongJae Park | 7a034fb | 2022-10-25 17:36:49 +0000 | [diff] [blame] | 320 | return err; |
SeongJae Park | 40e983c | 2022-06-13 19:23:00 +0000 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | module_init(damon_lru_sort_init); |