blob: ecb4b4ff4ce0aba2e4f85c3eaba0c1e1a3f37d43 [file] [log] [blame]
Liu Xinpeng2fb75e12021-10-25 11:46:26 +08001// SPDX-License-Identifier: GPL-2.0
Johannes Weinereb414682018-10-26 15:06:27 -07002/*
3 * Pressure stall information for CPU, memory and IO
4 *
5 * Copyright (c) 2018 Facebook, Inc.
6 * Author: Johannes Weiner <hannes@cmpxchg.org>
7 *
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07008 * Polling support by Suren Baghdasaryan <surenb@google.com>
9 * Copyright (c) 2018 Google, Inc.
10 *
Johannes Weinereb414682018-10-26 15:06:27 -070011 * When CPU, memory and IO are contended, tasks experience delays that
12 * reduce throughput and introduce latencies into the workload. Memory
13 * and IO contention, in addition, can cause a full loss of forward
14 * progress in which the CPU goes idle.
15 *
16 * This code aggregates individual task delays into resource pressure
17 * metrics that indicate problems with both workload health and
18 * resource utilization.
19 *
20 * Model
21 *
22 * The time in which a task can execute on a CPU is our baseline for
23 * productivity. Pressure expresses the amount of time in which this
24 * potential cannot be realized due to resource contention.
25 *
26 * This concept of productivity has two components: the workload and
27 * the CPU. To measure the impact of pressure on both, we define two
28 * contention states for a resource: SOME and FULL.
29 *
30 * In the SOME state of a given resource, one or more tasks are
31 * delayed on that resource. This affects the workload's ability to
32 * perform work, but the CPU may still be executing other tasks.
33 *
34 * In the FULL state of a given resource, all non-idle tasks are
35 * delayed on that resource such that nobody is advancing and the CPU
36 * goes idle. This leaves both workload and CPU unproductive.
37 *
Johannes Weinereb414682018-10-26 15:06:27 -070038 * SOME = nr_delayed_tasks != 0
Brian Chencb0e52b2021-11-10 21:33:12 +000039 * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
40 *
41 * What it means for a task to be productive is defined differently
42 * for each resource. For IO, productive means a running task. For
43 * memory, productive means a running task that isn't a reclaimer. For
44 * CPU, productive means an oncpu task.
45 *
46 * Naturally, the FULL state doesn't exist for the CPU resource at the
47 * system level, but exist at the cgroup level. At the cgroup level,
48 * FULL means all non-idle tasks in the cgroup are delayed on the CPU
49 * resource which is being used by others outside of the cgroup or
50 * throttled by the cgroup cpu.max configuration.
Johannes Weinereb414682018-10-26 15:06:27 -070051 *
52 * The percentage of wallclock time spent in those compound stall
53 * states gives pressure numbers between 0 and 100 for each resource,
54 * where the SOME percentage indicates workload slowdowns and the FULL
55 * percentage indicates reduced CPU utilization:
56 *
57 * %SOME = time(SOME) / period
58 * %FULL = time(FULL) / period
59 *
60 * Multiple CPUs
61 *
62 * The more tasks and available CPUs there are, the more work can be
63 * performed concurrently. This means that the potential that can go
64 * unrealized due to resource contention *also* scales with non-idle
65 * tasks and CPUs.
66 *
67 * Consider a scenario where 257 number crunching tasks are trying to
68 * run concurrently on 256 CPUs. If we simply aggregated the task
69 * states, we would have to conclude a CPU SOME pressure number of
70 * 100%, since *somebody* is waiting on a runqueue at all
71 * times. However, that is clearly not the amount of contention the
Ingo Molnar3b037062021-03-18 13:38:50 +010072 * workload is experiencing: only one out of 256 possible execution
Johannes Weinereb414682018-10-26 15:06:27 -070073 * threads will be contended at any given time, or about 0.4%.
74 *
75 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
76 * given time *one* of the tasks is delayed due to a lack of memory.
77 * Again, looking purely at the task state would yield a memory FULL
78 * pressure number of 0%, since *somebody* is always making forward
79 * progress. But again this wouldn't capture the amount of execution
80 * potential lost, which is 1 out of 4 CPUs, or 25%.
81 *
82 * To calculate wasted potential (pressure) with multiple processors,
83 * we have to base our calculation on the number of non-idle tasks in
84 * conjunction with the number of available CPUs, which is the number
85 * of potential execution threads. SOME becomes then the proportion of
Ingo Molnar3b037062021-03-18 13:38:50 +010086 * delayed tasks to possible threads, and FULL is the share of possible
Johannes Weinereb414682018-10-26 15:06:27 -070087 * threads that are unproductive due to delays:
88 *
89 * threads = min(nr_nonidle_tasks, nr_cpus)
90 * SOME = min(nr_delayed_tasks / threads, 1)
Brian Chencb0e52b2021-11-10 21:33:12 +000091 * FULL = (threads - min(nr_productive_tasks, threads)) / threads
Johannes Weinereb414682018-10-26 15:06:27 -070092 *
93 * For the 257 number crunchers on 256 CPUs, this yields:
94 *
95 * threads = min(257, 256)
96 * SOME = min(1 / 256, 1) = 0.4%
Brian Chencb0e52b2021-11-10 21:33:12 +000097 * FULL = (256 - min(256, 256)) / 256 = 0%
Johannes Weinereb414682018-10-26 15:06:27 -070098 *
99 * For the 1 out of 4 memory-delayed tasks, this yields:
100 *
101 * threads = min(4, 4)
102 * SOME = min(1 / 4, 1) = 25%
103 * FULL = (4 - min(3, 4)) / 4 = 25%
104 *
105 * [ Substitute nr_cpus with 1, and you can see that it's a natural
106 * extension of the single-CPU model. ]
107 *
108 * Implementation
109 *
110 * To assess the precise time spent in each such state, we would have
111 * to freeze the system on task changes and start/stop the state
112 * clocks accordingly. Obviously that doesn't scale in practice.
113 *
114 * Because the scheduler aims to distribute the compute load evenly
115 * among the available CPUs, we can track task state locally to each
116 * CPU and, at much lower frequency, extrapolate the global state for
117 * the cumulative stall times and the running averages.
118 *
119 * For each runqueue, we track:
120 *
121 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
Brian Chencb0e52b2021-11-10 21:33:12 +0000122 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
Johannes Weinereb414682018-10-26 15:06:27 -0700123 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
124 *
125 * and then periodically aggregate:
126 *
127 * tNONIDLE = sum(tNONIDLE[i])
128 *
129 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
130 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
131 *
132 * %SOME = tSOME / period
133 * %FULL = tFULL / period
134 *
135 * This gives us an approximation of pressure that is practical
136 * cost-wise, yet way more sensitive and accurate than periodic
137 * sampling of the aggregate task states would be.
138 */
139
Johannes Weinereb414682018-10-26 15:06:27 -0700140static int psi_bug __read_mostly;
141
Johannes Weinere0c27442018-11-30 14:09:58 -0800142DEFINE_STATIC_KEY_FALSE(psi_disabled);
Suren Baghdasaryan3958e2d2021-05-24 12:53:39 -0700143DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
Johannes Weinere0c27442018-11-30 14:09:58 -0800144
145#ifdef CONFIG_PSI_DEFAULT_DISABLED
Suren Baghdasaryan9289c5e2019-05-14 15:40:59 -0700146static bool psi_enable;
Johannes Weinere0c27442018-11-30 14:09:58 -0800147#else
Suren Baghdasaryan9289c5e2019-05-14 15:40:59 -0700148static bool psi_enable = true;
Johannes Weinere0c27442018-11-30 14:09:58 -0800149#endif
150static int __init setup_psi(char *str)
151{
152 return kstrtobool(str, &psi_enable) == 0;
153}
154__setup("psi=", setup_psi);
Johannes Weinereb414682018-10-26 15:06:27 -0700155
156/* Running averages - we need to be higher-res than loadavg */
157#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
158#define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
159#define EXP_60s 1981 /* 1/exp(2s/60s) */
160#define EXP_300s 2034 /* 1/exp(2s/300s) */
161
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700162/* PSI trigger definitions */
163#define WINDOW_MIN_US 500000 /* Min window size is 500ms */
164#define WINDOW_MAX_US 10000000 /* Max window size is 10s */
165#define UPDATES_PER_WINDOW 10 /* 10 updates per window */
166
Johannes Weinereb414682018-10-26 15:06:27 -0700167/* Sampling frequency in nanoseconds */
168static u64 psi_period __read_mostly;
169
170/* System-level pressure and stall tracking */
171static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
Dan Schatzbergdf5ba5b2019-05-14 15:41:18 -0700172struct psi_group psi_system = {
Johannes Weinereb414682018-10-26 15:06:27 -0700173 .pcpu = &system_group_pcpu,
174};
175
Suren Baghdasaryanbcc78db2019-05-14 15:41:02 -0700176static void psi_avgs_work(struct work_struct *work);
Johannes Weinereb414682018-10-26 15:06:27 -0700177
Zhaoyang Huang8f91efd2021-06-11 08:29:34 +0800178static void poll_timer_fn(struct timer_list *t);
179
Johannes Weinereb414682018-10-26 15:06:27 -0700180static void group_init(struct psi_group *group)
181{
182 int cpu;
183
184 for_each_possible_cpu(cpu)
185 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
Johannes Weiner3dfbe252019-12-03 13:35:23 -0500186 group->avg_last_update = sched_clock();
187 group->avg_next_update = group->avg_last_update + psi_period;
Suren Baghdasaryanbcc78db2019-05-14 15:41:02 -0700188 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
189 mutex_init(&group->avgs_lock);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700190 /* Init trigger-related members */
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700191 mutex_init(&group->trigger_lock);
192 INIT_LIST_HEAD(&group->triggers);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700193 group->poll_min_period = U32_MAX;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700194 group->polling_next_update = ULLONG_MAX;
Zhaoyang Huang8f91efd2021-06-11 08:29:34 +0800195 init_waitqueue_head(&group->poll_wait);
196 timer_setup(&group->poll_timer, poll_timer_fn, 0);
Suren Baghdasaryan461daba2020-05-28 12:54:42 -0700197 rcu_assign_pointer(group->poll_task, NULL);
Johannes Weinereb414682018-10-26 15:06:27 -0700198}
199
200void __init psi_init(void)
201{
Johannes Weinere0c27442018-11-30 14:09:58 -0800202 if (!psi_enable) {
203 static_branch_enable(&psi_disabled);
Johannes Weinereb414682018-10-26 15:06:27 -0700204 return;
Johannes Weinere0c27442018-11-30 14:09:58 -0800205 }
Johannes Weinereb414682018-10-26 15:06:27 -0700206
Suren Baghdasaryan3958e2d2021-05-24 12:53:39 -0700207 if (!cgroup_psi_enabled())
208 static_branch_disable(&psi_cgroups_enabled);
209
Johannes Weinereb414682018-10-26 15:06:27 -0700210 psi_period = jiffies_to_nsecs(PSI_FREQ);
211 group_init(&psi_system);
212}
213
214static bool test_state(unsigned int *tasks, enum psi_states state)
215{
216 switch (state) {
217 case PSI_IO_SOME:
Johannes Weinerfddc8ba2021-03-03 11:46:58 +0800218 return unlikely(tasks[NR_IOWAIT]);
Johannes Weinereb414682018-10-26 15:06:27 -0700219 case PSI_IO_FULL:
Johannes Weinerfddc8ba2021-03-03 11:46:58 +0800220 return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]);
Johannes Weinereb414682018-10-26 15:06:27 -0700221 case PSI_MEM_SOME:
Johannes Weinerfddc8ba2021-03-03 11:46:58 +0800222 return unlikely(tasks[NR_MEMSTALL]);
Johannes Weinereb414682018-10-26 15:06:27 -0700223 case PSI_MEM_FULL:
Brian Chencb0e52b2021-11-10 21:33:12 +0000224 return unlikely(tasks[NR_MEMSTALL] &&
225 tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
Johannes Weinereb414682018-10-26 15:06:27 -0700226 case PSI_CPU_SOME:
Johannes Weinerfddc8ba2021-03-03 11:46:58 +0800227 return unlikely(tasks[NR_RUNNING] > tasks[NR_ONCPU]);
Chengming Zhoue7fcd762021-03-03 11:46:56 +0800228 case PSI_CPU_FULL:
Johannes Weinerfddc8ba2021-03-03 11:46:58 +0800229 return unlikely(tasks[NR_RUNNING] && !tasks[NR_ONCPU]);
Johannes Weinereb414682018-10-26 15:06:27 -0700230 case PSI_NONIDLE:
231 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
232 tasks[NR_RUNNING];
233 default:
234 return false;
235 }
236}
237
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700238static void get_recent_times(struct psi_group *group, int cpu,
239 enum psi_aggregators aggregator, u32 *times,
Suren Baghdasaryan333f3017c2019-05-14 15:41:09 -0700240 u32 *pchanged_states)
Johannes Weinereb414682018-10-26 15:06:27 -0700241{
242 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
Johannes Weinereb414682018-10-26 15:06:27 -0700243 u64 now, state_start;
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700244 enum psi_states s;
Johannes Weinereb414682018-10-26 15:06:27 -0700245 unsigned int seq;
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700246 u32 state_mask;
Johannes Weinereb414682018-10-26 15:06:27 -0700247
Suren Baghdasaryan333f3017c2019-05-14 15:41:09 -0700248 *pchanged_states = 0;
249
Johannes Weinereb414682018-10-26 15:06:27 -0700250 /* Snapshot a coherent view of the CPU state */
251 do {
252 seq = read_seqcount_begin(&groupc->seq);
253 now = cpu_clock(cpu);
254 memcpy(times, groupc->times, sizeof(groupc->times));
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700255 state_mask = groupc->state_mask;
Johannes Weinereb414682018-10-26 15:06:27 -0700256 state_start = groupc->state_start;
257 } while (read_seqcount_retry(&groupc->seq, seq));
258
259 /* Calculate state time deltas against the previous snapshot */
260 for (s = 0; s < NR_PSI_STATES; s++) {
261 u32 delta;
262 /*
263 * In addition to already concluded states, we also
264 * incorporate currently active states on the CPU,
265 * since states may last for many sampling periods.
266 *
267 * This way we keep our delta sampling buckets small
268 * (u32) and our reported pressure close to what's
269 * actually happening.
270 */
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700271 if (state_mask & (1 << s))
Johannes Weinereb414682018-10-26 15:06:27 -0700272 times[s] += now - state_start;
273
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700274 delta = times[s] - groupc->times_prev[aggregator][s];
275 groupc->times_prev[aggregator][s] = times[s];
Johannes Weinereb414682018-10-26 15:06:27 -0700276
277 times[s] = delta;
Suren Baghdasaryan333f3017c2019-05-14 15:41:09 -0700278 if (delta)
279 *pchanged_states |= (1 << s);
Johannes Weinereb414682018-10-26 15:06:27 -0700280 }
281}
282
283static void calc_avgs(unsigned long avg[3], int missed_periods,
284 u64 time, u64 period)
285{
286 unsigned long pct;
287
288 /* Fill in zeroes for periods of no activity */
289 if (missed_periods) {
290 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
291 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
292 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
293 }
294
295 /* Sample the most recent active period */
296 pct = div_u64(time * 100, period);
297 pct *= FIXED_1;
298 avg[0] = calc_load(avg[0], EXP_10s, pct);
299 avg[1] = calc_load(avg[1], EXP_60s, pct);
300 avg[2] = calc_load(avg[2], EXP_300s, pct);
301}
302
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700303static void collect_percpu_times(struct psi_group *group,
304 enum psi_aggregators aggregator,
305 u32 *pchanged_states)
Johannes Weinereb414682018-10-26 15:06:27 -0700306{
307 u64 deltas[NR_PSI_STATES - 1] = { 0, };
Johannes Weinereb414682018-10-26 15:06:27 -0700308 unsigned long nonidle_total = 0;
Suren Baghdasaryan333f3017c2019-05-14 15:41:09 -0700309 u32 changed_states = 0;
Johannes Weinereb414682018-10-26 15:06:27 -0700310 int cpu;
311 int s;
312
Johannes Weinereb414682018-10-26 15:06:27 -0700313 /*
314 * Collect the per-cpu time buckets and average them into a
315 * single time sample that is normalized to wallclock time.
316 *
317 * For averaging, each CPU is weighted by its non-idle time in
318 * the sampling period. This eliminates artifacts from uneven
319 * loading, or even entirely idle CPUs.
320 */
321 for_each_possible_cpu(cpu) {
322 u32 times[NR_PSI_STATES];
323 u32 nonidle;
Suren Baghdasaryan333f3017c2019-05-14 15:41:09 -0700324 u32 cpu_changed_states;
Johannes Weinereb414682018-10-26 15:06:27 -0700325
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700326 get_recent_times(group, cpu, aggregator, times,
Suren Baghdasaryan333f3017c2019-05-14 15:41:09 -0700327 &cpu_changed_states);
328 changed_states |= cpu_changed_states;
Johannes Weinereb414682018-10-26 15:06:27 -0700329
330 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
331 nonidle_total += nonidle;
332
333 for (s = 0; s < PSI_NONIDLE; s++)
334 deltas[s] += (u64)times[s] * nonidle;
335 }
336
337 /*
338 * Integrate the sample into the running statistics that are
339 * reported to userspace: the cumulative stall times and the
340 * decaying averages.
341 *
342 * Pressure percentages are sampled at PSI_FREQ. We might be
343 * called more often when the user polls more frequently than
344 * that; we might be called less often when there is no task
345 * activity, thus no data, and clock ticks are sporadic. The
346 * below handles both.
347 */
348
349 /* total= */
350 for (s = 0; s < NR_PSI_STATES - 1; s++)
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700351 group->total[aggregator][s] +=
352 div_u64(deltas[s], max(nonidle_total, 1UL));
Johannes Weinereb414682018-10-26 15:06:27 -0700353
Suren Baghdasaryan333f3017c2019-05-14 15:41:09 -0700354 if (pchanged_states)
355 *pchanged_states = changed_states;
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -0700356}
357
358static u64 update_averages(struct psi_group *group, u64 now)
359{
360 unsigned long missed_periods = 0;
361 u64 expires, period;
362 u64 avg_next_update;
363 int s;
364
Johannes Weinereb414682018-10-26 15:06:27 -0700365 /* avgX= */
Suren Baghdasaryanbcc78db2019-05-14 15:41:02 -0700366 expires = group->avg_next_update;
Johannes Weiner4e375042019-02-20 22:19:59 -0800367 if (now - expires >= psi_period)
Johannes Weinereb414682018-10-26 15:06:27 -0700368 missed_periods = div_u64(now - expires, psi_period);
369
370 /*
371 * The periodic clock tick can get delayed for various
372 * reasons, especially on loaded systems. To avoid clock
373 * drift, we schedule the clock in fixed psi_period intervals.
374 * But the deltas we sample out of the per-cpu buckets above
375 * are based on the actual time elapsing between clock ticks.
376 */
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -0700377 avg_next_update = expires + ((1 + missed_periods) * psi_period);
Suren Baghdasaryanbcc78db2019-05-14 15:41:02 -0700378 period = now - (group->avg_last_update + (missed_periods * psi_period));
379 group->avg_last_update = now;
Johannes Weinereb414682018-10-26 15:06:27 -0700380
381 for (s = 0; s < NR_PSI_STATES - 1; s++) {
382 u32 sample;
383
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700384 sample = group->total[PSI_AVGS][s] - group->avg_total[s];
Johannes Weinereb414682018-10-26 15:06:27 -0700385 /*
386 * Due to the lockless sampling of the time buckets,
387 * recorded time deltas can slip into the next period,
388 * which under full pressure can result in samples in
389 * excess of the period length.
390 *
391 * We don't want to report non-sensical pressures in
392 * excess of 100%, nor do we want to drop such events
393 * on the floor. Instead we punt any overage into the
394 * future until pressure subsides. By doing this we
395 * don't underreport the occurring pressure curve, we
396 * just report it delayed by one period length.
397 *
398 * The error isn't cumulative. As soon as another
399 * delta slips from a period P to P+1, by definition
400 * it frees up its time T in P.
401 */
402 if (sample > period)
403 sample = period;
Suren Baghdasaryanbcc78db2019-05-14 15:41:02 -0700404 group->avg_total[s] += sample;
Johannes Weinereb414682018-10-26 15:06:27 -0700405 calc_avgs(group->avg[s], missed_periods, sample, period);
406 }
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -0700407
408 return avg_next_update;
Johannes Weinereb414682018-10-26 15:06:27 -0700409}
410
Suren Baghdasaryanbcc78db2019-05-14 15:41:02 -0700411static void psi_avgs_work(struct work_struct *work)
Johannes Weinereb414682018-10-26 15:06:27 -0700412{
413 struct delayed_work *dwork;
414 struct psi_group *group;
Suren Baghdasaryan333f3017c2019-05-14 15:41:09 -0700415 u32 changed_states;
Johannes Weinereb414682018-10-26 15:06:27 -0700416 bool nonidle;
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -0700417 u64 now;
Johannes Weinereb414682018-10-26 15:06:27 -0700418
419 dwork = to_delayed_work(work);
Suren Baghdasaryanbcc78db2019-05-14 15:41:02 -0700420 group = container_of(dwork, struct psi_group, avgs_work);
Johannes Weinereb414682018-10-26 15:06:27 -0700421
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -0700422 mutex_lock(&group->avgs_lock);
423
424 now = sched_clock();
425
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700426 collect_percpu_times(group, PSI_AVGS, &changed_states);
Suren Baghdasaryan333f3017c2019-05-14 15:41:09 -0700427 nonidle = changed_states & (1 << PSI_NONIDLE);
Johannes Weinereb414682018-10-26 15:06:27 -0700428 /*
429 * If there is task activity, periodically fold the per-cpu
430 * times and feed samples into the running averages. If things
431 * are idle and there is no data to process, stop the clock.
432 * Once restarted, we'll catch up the running averages in one
433 * go - see calc_avgs() and missed_periods.
434 */
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -0700435 if (now >= group->avg_next_update)
436 group->avg_next_update = update_averages(group, now);
Johannes Weinereb414682018-10-26 15:06:27 -0700437
438 if (nonidle) {
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -0700439 schedule_delayed_work(dwork, nsecs_to_jiffies(
440 group->avg_next_update - now) + 1);
Johannes Weinereb414682018-10-26 15:06:27 -0700441 }
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -0700442
443 mutex_unlock(&group->avgs_lock);
Johannes Weinereb414682018-10-26 15:06:27 -0700444}
445
Ingo Molnar3b037062021-03-18 13:38:50 +0100446/* Trigger tracking window manipulations */
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700447static void window_reset(struct psi_window *win, u64 now, u64 value,
448 u64 prev_growth)
449{
450 win->start_time = now;
451 win->start_value = value;
452 win->prev_growth = prev_growth;
453}
454
455/*
456 * PSI growth tracking window update and growth calculation routine.
457 *
458 * This approximates a sliding tracking window by interpolating
459 * partially elapsed windows using historical growth data from the
460 * previous intervals. This minimizes memory requirements (by not storing
461 * all the intermediate values in the previous window) and simplifies
462 * the calculations. It works well because PSI signal changes only in
463 * positive direction and over relatively small window sizes the growth
464 * is close to linear.
465 */
466static u64 window_update(struct psi_window *win, u64 now, u64 value)
467{
468 u64 elapsed;
469 u64 growth;
470
471 elapsed = now - win->start_time;
472 growth = value - win->start_value;
473 /*
474 * After each tracking window passes win->start_value and
475 * win->start_time get reset and win->prev_growth stores
476 * the average per-window growth of the previous window.
477 * win->prev_growth is then used to interpolate additional
478 * growth from the previous window assuming it was linear.
479 */
480 if (elapsed > win->size)
481 window_reset(win, now, value, growth);
482 else {
483 u32 remaining;
484
485 remaining = win->size - elapsed;
Johannes Weinerc3466952019-12-03 13:35:24 -0500486 growth += div64_u64(win->prev_growth * remaining, win->size);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700487 }
488
489 return growth;
490}
491
492static void init_triggers(struct psi_group *group, u64 now)
493{
494 struct psi_trigger *t;
495
496 list_for_each_entry(t, &group->triggers, node)
497 window_reset(&t->win, now,
498 group->total[PSI_POLL][t->state], 0);
499 memcpy(group->polling_total, group->total[PSI_POLL],
500 sizeof(group->polling_total));
501 group->polling_next_update = now + group->poll_min_period;
502}
503
504static u64 update_triggers(struct psi_group *group, u64 now)
505{
506 struct psi_trigger *t;
Zhaoyang Huange6df4ea2022-01-25 14:56:58 +0800507 bool update_total = false;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700508 u64 *total = group->total[PSI_POLL];
509
510 /*
511 * On subsequent updates, calculate growth deltas and let
512 * watchers know when their specified thresholds are exceeded.
513 */
514 list_for_each_entry(t, &group->triggers, node) {
515 u64 growth;
Zhaoyang Huange6df4ea2022-01-25 14:56:58 +0800516 bool new_stall;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700517
Zhaoyang Huange6df4ea2022-01-25 14:56:58 +0800518 new_stall = group->polling_total[t->state] != total[t->state];
519
520 /* Check for stall activity or a previous threshold breach */
521 if (!new_stall && !t->pending_event)
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700522 continue;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700523 /*
Zhaoyang Huange6df4ea2022-01-25 14:56:58 +0800524 * Check for new stall activity, as well as deferred
525 * events that occurred in the last window after the
526 * trigger had already fired (we want to ratelimit
527 * events without dropping any).
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700528 */
Zhaoyang Huange6df4ea2022-01-25 14:56:58 +0800529 if (new_stall) {
530 /*
531 * Multiple triggers might be looking at the same state,
532 * remember to update group->polling_total[] once we've
533 * been through all of them. Also remember to extend the
534 * polling time if we see new stall activity.
535 */
536 update_total = true;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700537
Zhaoyang Huange6df4ea2022-01-25 14:56:58 +0800538 /* Calculate growth since last update */
539 growth = window_update(&t->win, now, total[t->state]);
540 if (growth < t->threshold)
541 continue;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700542
Zhaoyang Huange6df4ea2022-01-25 14:56:58 +0800543 t->pending_event = true;
544 }
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700545 /* Limit event signaling to once per window */
546 if (now < t->last_event_time + t->win.size)
547 continue;
548
549 /* Generate an event */
550 if (cmpxchg(&t->event, 0, 1) == 0)
551 wake_up_interruptible(&t->event_wait);
552 t->last_event_time = now;
Zhaoyang Huange6df4ea2022-01-25 14:56:58 +0800553 /* Reset threshold breach flag once event got generated */
554 t->pending_event = false;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700555 }
556
Zhaoyang Huange6df4ea2022-01-25 14:56:58 +0800557 if (update_total)
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700558 memcpy(group->polling_total, total,
559 sizeof(group->polling_total));
560
561 return now + group->poll_min_period;
562}
563
Suren Baghdasaryan461daba2020-05-28 12:54:42 -0700564/* Schedule polling if it's not already scheduled. */
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700565static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
566{
Suren Baghdasaryan461daba2020-05-28 12:54:42 -0700567 struct task_struct *task;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700568
Suren Baghdasaryan461daba2020-05-28 12:54:42 -0700569 /*
570 * Do not reschedule if already scheduled.
571 * Possible race with a timer scheduled after this check but before
572 * mod_timer below can be tolerated because group->polling_next_update
573 * will keep updates on schedule.
574 */
575 if (timer_pending(&group->poll_timer))
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700576 return;
577
578 rcu_read_lock();
579
Suren Baghdasaryan461daba2020-05-28 12:54:42 -0700580 task = rcu_dereference(group->poll_task);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700581 /*
582 * kworker might be NULL in case psi_trigger_destroy races with
583 * psi_task_change (hotpath) which can't use locks
584 */
Suren Baghdasaryan461daba2020-05-28 12:54:42 -0700585 if (likely(task))
586 mod_timer(&group->poll_timer, jiffies + delay);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700587
588 rcu_read_unlock();
589}
590
Suren Baghdasaryan461daba2020-05-28 12:54:42 -0700591static void psi_poll_work(struct psi_group *group)
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700592{
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700593 u32 changed_states;
594 u64 now;
595
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700596 mutex_lock(&group->trigger_lock);
597
598 now = sched_clock();
599
600 collect_percpu_times(group, PSI_POLL, &changed_states);
601
602 if (changed_states & group->poll_states) {
603 /* Initialize trigger windows when entering polling mode */
604 if (now > group->polling_until)
605 init_triggers(group, now);
606
607 /*
608 * Keep the monitor active for at least the duration of the
609 * minimum tracking window as long as monitor states are
610 * changing.
611 */
612 group->polling_until = now +
613 group->poll_min_period * UPDATES_PER_WINDOW;
614 }
615
616 if (now > group->polling_until) {
617 group->polling_next_update = ULLONG_MAX;
618 goto out;
619 }
620
621 if (now >= group->polling_next_update)
622 group->polling_next_update = update_triggers(group, now);
623
624 psi_schedule_poll_work(group,
625 nsecs_to_jiffies(group->polling_next_update - now) + 1);
626
627out:
628 mutex_unlock(&group->trigger_lock);
629}
630
Suren Baghdasaryan461daba2020-05-28 12:54:42 -0700631static int psi_poll_worker(void *data)
632{
633 struct psi_group *group = (struct psi_group *)data;
Suren Baghdasaryan461daba2020-05-28 12:54:42 -0700634
Peter Zijlstra2cca5422020-04-21 12:09:13 +0200635 sched_set_fifo_low(current);
Suren Baghdasaryan461daba2020-05-28 12:54:42 -0700636
637 while (true) {
638 wait_event_interruptible(group->poll_wait,
639 atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
640 kthread_should_stop());
641 if (kthread_should_stop())
642 break;
643
644 psi_poll_work(group);
645 }
646 return 0;
647}
648
649static void poll_timer_fn(struct timer_list *t)
650{
651 struct psi_group *group = from_timer(group, t, poll_timer);
652
653 atomic_set(&group->poll_wakeup, 1);
654 wake_up_interruptible(&group->poll_wait);
655}
656
Shakeel Buttdf774302021-03-21 13:51:56 -0700657static void record_times(struct psi_group_cpu *groupc, u64 now)
Johannes Weinereb414682018-10-26 15:06:27 -0700658{
659 u32 delta;
Johannes Weinereb414682018-10-26 15:06:27 -0700660
Johannes Weinereb414682018-10-26 15:06:27 -0700661 delta = now - groupc->state_start;
662 groupc->state_start = now;
663
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700664 if (groupc->state_mask & (1 << PSI_IO_SOME)) {
Johannes Weinereb414682018-10-26 15:06:27 -0700665 groupc->times[PSI_IO_SOME] += delta;
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700666 if (groupc->state_mask & (1 << PSI_IO_FULL))
Johannes Weinereb414682018-10-26 15:06:27 -0700667 groupc->times[PSI_IO_FULL] += delta;
668 }
669
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700670 if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
Johannes Weinereb414682018-10-26 15:06:27 -0700671 groupc->times[PSI_MEM_SOME] += delta;
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700672 if (groupc->state_mask & (1 << PSI_MEM_FULL))
Johannes Weinereb414682018-10-26 15:06:27 -0700673 groupc->times[PSI_MEM_FULL] += delta;
Johannes Weinereb414682018-10-26 15:06:27 -0700674 }
675
Chengming Zhoue7fcd762021-03-03 11:46:56 +0800676 if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
Johannes Weinereb414682018-10-26 15:06:27 -0700677 groupc->times[PSI_CPU_SOME] += delta;
Chengming Zhoue7fcd762021-03-03 11:46:56 +0800678 if (groupc->state_mask & (1 << PSI_CPU_FULL))
679 groupc->times[PSI_CPU_FULL] += delta;
680 }
Johannes Weinereb414682018-10-26 15:06:27 -0700681
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700682 if (groupc->state_mask & (1 << PSI_NONIDLE))
Johannes Weinereb414682018-10-26 15:06:27 -0700683 groupc->times[PSI_NONIDLE] += delta;
684}
685
Johannes Weiner36b238d2020-03-16 15:13:32 -0400686static void psi_group_change(struct psi_group *group, int cpu,
Shakeel Buttdf774302021-03-21 13:51:56 -0700687 unsigned int clear, unsigned int set, u64 now,
Johannes Weiner36b238d2020-03-16 15:13:32 -0400688 bool wake_clock)
Johannes Weinereb414682018-10-26 15:06:27 -0700689{
690 struct psi_group_cpu *groupc;
Johannes Weiner36b238d2020-03-16 15:13:32 -0400691 u32 state_mask = 0;
Johannes Weinereb414682018-10-26 15:06:27 -0700692 unsigned int t, m;
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700693 enum psi_states s;
Johannes Weinereb414682018-10-26 15:06:27 -0700694
695 groupc = per_cpu_ptr(group->pcpu, cpu);
696
697 /*
698 * First we assess the aggregate resource states this CPU's
699 * tasks have been in since the last change, and account any
700 * SOME and FULL time these may have resulted in.
701 *
702 * Then we update the task counts according to the state
703 * change requested through the @clear and @set bits.
704 */
705 write_seqcount_begin(&groupc->seq);
706
Shakeel Buttdf774302021-03-21 13:51:56 -0700707 record_times(groupc, now);
Johannes Weinereb414682018-10-26 15:06:27 -0700708
709 for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
710 if (!(m & (1 << t)))
711 continue;
Charan Teja Reddy9d10a132021-04-16 20:32:16 +0530712 if (groupc->tasks[t]) {
713 groupc->tasks[t]--;
714 } else if (!psi_bug) {
Brian Chencb0e52b2021-11-10 21:33:12 +0000715 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u %u] clear=%x set=%x\n",
Johannes Weinereb414682018-10-26 15:06:27 -0700716 cpu, t, groupc->tasks[0],
717 groupc->tasks[1], groupc->tasks[2],
Brian Chencb0e52b2021-11-10 21:33:12 +0000718 groupc->tasks[3], groupc->tasks[4],
719 clear, set);
Johannes Weinereb414682018-10-26 15:06:27 -0700720 psi_bug = 1;
721 }
Johannes Weinereb414682018-10-26 15:06:27 -0700722 }
723
724 for (t = 0; set; set &= ~(1 << t), t++)
725 if (set & (1 << t))
726 groupc->tasks[t]++;
727
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700728 /* Calculate state mask representing active states */
729 for (s = 0; s < NR_PSI_STATES; s++) {
730 if (test_state(groupc->tasks, s))
731 state_mask |= (1 << s);
732 }
Chengming Zhou7fae6c82021-03-03 11:46:57 +0800733
734 /*
735 * Since we care about lost potential, a memstall is FULL
736 * when there are no other working tasks, but also when
737 * the CPU is actively reclaiming and nothing productive
738 * could run even if it were runnable. So when the current
739 * task in a cgroup is in_memstall, the corresponding groupc
740 * on that cpu is in PSI_MEM_FULL state.
741 */
Johannes Weinerfddc8ba2021-03-03 11:46:58 +0800742 if (unlikely(groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall))
Chengming Zhou7fae6c82021-03-03 11:46:57 +0800743 state_mask |= (1 << PSI_MEM_FULL);
744
Suren Baghdasaryan33b2d632019-05-14 15:40:56 -0700745 groupc->state_mask = state_mask;
746
Johannes Weinereb414682018-10-26 15:06:27 -0700747 write_seqcount_end(&groupc->seq);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700748
Johannes Weiner36b238d2020-03-16 15:13:32 -0400749 if (state_mask & group->poll_states)
750 psi_schedule_poll_work(group, 1);
751
752 if (wake_clock && !delayed_work_pending(&group->avgs_work))
753 schedule_delayed_work(&group->avgs_work, PSI_FREQ);
Johannes Weinereb414682018-10-26 15:06:27 -0700754}
755
Johannes Weiner2ce71352018-10-26 15:06:31 -0700756static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
757{
Suren Baghdasaryan3958e2d2021-05-24 12:53:39 -0700758 if (*iter == &psi_system)
759 return NULL;
760
Johannes Weiner2ce71352018-10-26 15:06:31 -0700761#ifdef CONFIG_CGROUPS
Suren Baghdasaryan3958e2d2021-05-24 12:53:39 -0700762 if (static_branch_likely(&psi_cgroups_enabled)) {
763 struct cgroup *cgroup = NULL;
Johannes Weiner2ce71352018-10-26 15:06:31 -0700764
Suren Baghdasaryan3958e2d2021-05-24 12:53:39 -0700765 if (!*iter)
766 cgroup = task->cgroups->dfl_cgrp;
767 else
768 cgroup = cgroup_parent(*iter);
Johannes Weiner2ce71352018-10-26 15:06:31 -0700769
Suren Baghdasaryan3958e2d2021-05-24 12:53:39 -0700770 if (cgroup && cgroup_parent(cgroup)) {
771 *iter = cgroup;
772 return cgroup_psi(cgroup);
773 }
Johannes Weiner2ce71352018-10-26 15:06:31 -0700774 }
Johannes Weiner2ce71352018-10-26 15:06:31 -0700775#endif
776 *iter = &psi_system;
777 return &psi_system;
778}
779
Johannes Weiner36b238d2020-03-16 15:13:32 -0400780static void psi_flags_change(struct task_struct *task, int clear, int set)
781{
782 if (((task->psi_flags & set) ||
783 (task->psi_flags & clear) != clear) &&
784 !psi_bug) {
785 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
786 task->pid, task->comm, task_cpu(task),
787 task->psi_flags, clear, set);
788 psi_bug = 1;
789 }
790
791 task->psi_flags &= ~clear;
792 task->psi_flags |= set;
793}
794
Johannes Weinereb414682018-10-26 15:06:27 -0700795void psi_task_change(struct task_struct *task, int clear, int set)
796{
797 int cpu = task_cpu(task);
Johannes Weiner2ce71352018-10-26 15:06:31 -0700798 struct psi_group *group;
Johannes Weiner1b69ac62019-02-01 14:20:42 -0800799 bool wake_clock = true;
Johannes Weiner2ce71352018-10-26 15:06:31 -0700800 void *iter = NULL;
Shakeel Buttdf774302021-03-21 13:51:56 -0700801 u64 now;
Johannes Weinereb414682018-10-26 15:06:27 -0700802
803 if (!task->pid)
804 return;
805
Johannes Weiner36b238d2020-03-16 15:13:32 -0400806 psi_flags_change(task, clear, set);
Johannes Weinereb414682018-10-26 15:06:27 -0700807
Shakeel Buttdf774302021-03-21 13:51:56 -0700808 now = cpu_clock(cpu);
Johannes Weiner1b69ac62019-02-01 14:20:42 -0800809 /*
810 * Periodic aggregation shuts off if there is a period of no
811 * task changes, so we wake it back up if necessary. However,
812 * don't do this if the task change is the aggregation worker
813 * itself going to sleep, or we'll ping-pong forever.
814 */
815 if (unlikely((clear & TSK_RUNNING) &&
816 (task->flags & PF_WQ_WORKER) &&
Suren Baghdasaryanbcc78db2019-05-14 15:41:02 -0700817 wq_worker_last_func(task) == psi_avgs_work))
Johannes Weiner1b69ac62019-02-01 14:20:42 -0800818 wake_clock = false;
819
Johannes Weiner36b238d2020-03-16 15:13:32 -0400820 while ((group = iterate_groups(task, &iter)))
Shakeel Buttdf774302021-03-21 13:51:56 -0700821 psi_group_change(group, cpu, clear, set, now, wake_clock);
Johannes Weiner36b238d2020-03-16 15:13:32 -0400822}
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700823
Johannes Weiner36b238d2020-03-16 15:13:32 -0400824void psi_task_switch(struct task_struct *prev, struct task_struct *next,
825 bool sleep)
826{
827 struct psi_group *group, *common = NULL;
828 int cpu = task_cpu(prev);
829 void *iter;
Shakeel Buttdf774302021-03-21 13:51:56 -0700830 u64 now = cpu_clock(cpu);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700831
Johannes Weiner36b238d2020-03-16 15:13:32 -0400832 if (next->pid) {
Chengming Zhou7fae6c82021-03-03 11:46:57 +0800833 bool identical_state;
834
Johannes Weiner36b238d2020-03-16 15:13:32 -0400835 psi_flags_change(next, 0, TSK_ONCPU);
836 /*
Chengming Zhou7fae6c82021-03-03 11:46:57 +0800837 * When switching between tasks that have an identical
838 * runtime state, the cgroup that contains both tasks
Chengming Zhou7fae6c82021-03-03 11:46:57 +0800839 * we reach the first common ancestor. Iterate @next's
840 * ancestors only until we encounter @prev's ONCPU.
Johannes Weiner36b238d2020-03-16 15:13:32 -0400841 */
Chengming Zhou7fae6c82021-03-03 11:46:57 +0800842 identical_state = prev->psi_flags == next->psi_flags;
Johannes Weiner36b238d2020-03-16 15:13:32 -0400843 iter = NULL;
844 while ((group = iterate_groups(next, &iter))) {
Chengming Zhou7fae6c82021-03-03 11:46:57 +0800845 if (identical_state &&
846 per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) {
Johannes Weiner36b238d2020-03-16 15:13:32 -0400847 common = group;
848 break;
849 }
850
Shakeel Buttdf774302021-03-21 13:51:56 -0700851 psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
Johannes Weiner36b238d2020-03-16 15:13:32 -0400852 }
853 }
854
Johannes Weiner36b238d2020-03-16 15:13:32 -0400855 if (prev->pid) {
Chengming Zhou4117ceb2021-03-03 11:46:59 +0800856 int clear = TSK_ONCPU, set = 0;
857
858 /*
Brian Chencb0e52b2021-11-10 21:33:12 +0000859 * When we're going to sleep, psi_dequeue() lets us
860 * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
861 * TSK_IOWAIT here, where we can combine it with
862 * TSK_ONCPU and save walking common ancestors twice.
Chengming Zhou4117ceb2021-03-03 11:46:59 +0800863 */
864 if (sleep) {
865 clear |= TSK_RUNNING;
Brian Chencb0e52b2021-11-10 21:33:12 +0000866 if (prev->in_memstall)
867 clear |= TSK_MEMSTALL_RUNNING;
Chengming Zhou4117ceb2021-03-03 11:46:59 +0800868 if (prev->in_iowait)
869 set |= TSK_IOWAIT;
870 }
871
872 psi_flags_change(prev, clear, set);
Johannes Weiner36b238d2020-03-16 15:13:32 -0400873
874 iter = NULL;
875 while ((group = iterate_groups(prev, &iter)) && group != common)
Shakeel Buttdf774302021-03-21 13:51:56 -0700876 psi_group_change(group, cpu, clear, set, now, true);
Chengming Zhou4117ceb2021-03-03 11:46:59 +0800877
878 /*
879 * TSK_ONCPU is handled up to the common ancestor. If we're tasked
880 * with dequeuing too, finish that for the rest of the hierarchy.
881 */
882 if (sleep) {
883 clear &= ~TSK_ONCPU;
884 for (; group; group = iterate_groups(prev, &iter))
Shakeel Buttdf774302021-03-21 13:51:56 -0700885 psi_group_change(group, cpu, clear, set, now, true);
Chengming Zhou4117ceb2021-03-03 11:46:59 +0800886 }
Johannes Weiner1b69ac62019-02-01 14:20:42 -0800887 }
Johannes Weinereb414682018-10-26 15:06:27 -0700888}
889
Johannes Weinereb414682018-10-26 15:06:27 -0700890/**
891 * psi_memstall_enter - mark the beginning of a memory stall section
892 * @flags: flags to handle nested sections
893 *
894 * Marks the calling task as being stalled due to a lack of memory,
895 * such as waiting for a refault or performing reclaim.
896 */
897void psi_memstall_enter(unsigned long *flags)
898{
899 struct rq_flags rf;
900 struct rq *rq;
901
Johannes Weinere0c27442018-11-30 14:09:58 -0800902 if (static_branch_likely(&psi_disabled))
Johannes Weinereb414682018-10-26 15:06:27 -0700903 return;
904
Yafang Shao1066d1b2020-03-16 21:28:05 -0400905 *flags = current->in_memstall;
Johannes Weinereb414682018-10-26 15:06:27 -0700906 if (*flags)
907 return;
908 /*
Yafang Shao1066d1b2020-03-16 21:28:05 -0400909 * in_memstall setting & accounting needs to be atomic wrt
Johannes Weinereb414682018-10-26 15:06:27 -0700910 * changes to the task's scheduling state, otherwise we can
911 * race with CPU migration.
912 */
913 rq = this_rq_lock_irq(&rf);
914
Yafang Shao1066d1b2020-03-16 21:28:05 -0400915 current->in_memstall = 1;
Brian Chencb0e52b2021-11-10 21:33:12 +0000916 psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
Johannes Weinereb414682018-10-26 15:06:27 -0700917
918 rq_unlock_irq(rq, &rf);
919}
920
921/**
922 * psi_memstall_leave - mark the end of an memory stall section
923 * @flags: flags to handle nested memdelay sections
924 *
925 * Marks the calling task as no longer stalled due to lack of memory.
926 */
927void psi_memstall_leave(unsigned long *flags)
928{
929 struct rq_flags rf;
930 struct rq *rq;
931
Johannes Weinere0c27442018-11-30 14:09:58 -0800932 if (static_branch_likely(&psi_disabled))
Johannes Weinereb414682018-10-26 15:06:27 -0700933 return;
934
935 if (*flags)
936 return;
937 /*
Yafang Shao1066d1b2020-03-16 21:28:05 -0400938 * in_memstall clearing & accounting needs to be atomic wrt
Johannes Weinereb414682018-10-26 15:06:27 -0700939 * changes to the task's scheduling state, otherwise we could
940 * race with CPU migration.
941 */
942 rq = this_rq_lock_irq(&rf);
943
Yafang Shao1066d1b2020-03-16 21:28:05 -0400944 current->in_memstall = 0;
Brian Chencb0e52b2021-11-10 21:33:12 +0000945 psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
Johannes Weinereb414682018-10-26 15:06:27 -0700946
947 rq_unlock_irq(rq, &rf);
948}
949
Johannes Weiner2ce71352018-10-26 15:06:31 -0700950#ifdef CONFIG_CGROUPS
951int psi_cgroup_alloc(struct cgroup *cgroup)
952{
Johannes Weinere0c27442018-11-30 14:09:58 -0800953 if (static_branch_likely(&psi_disabled))
Johannes Weiner2ce71352018-10-26 15:06:31 -0700954 return 0;
955
Hao Jia2b97cf72022-08-06 20:05:08 +0800956 cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
Chen Wandun5f69a652022-05-26 20:26:56 +0800957 if (!cgroup->psi)
Johannes Weiner2ce71352018-10-26 15:06:31 -0700958 return -ENOMEM;
Chen Wandun5f69a652022-05-26 20:26:56 +0800959
960 cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
961 if (!cgroup->psi->pcpu) {
962 kfree(cgroup->psi);
963 return -ENOMEM;
964 }
965 group_init(cgroup->psi);
Johannes Weiner2ce71352018-10-26 15:06:31 -0700966 return 0;
967}
968
969void psi_cgroup_free(struct cgroup *cgroup)
970{
Johannes Weinere0c27442018-11-30 14:09:58 -0800971 if (static_branch_likely(&psi_disabled))
Johannes Weiner2ce71352018-10-26 15:06:31 -0700972 return;
973
Chen Wandun5f69a652022-05-26 20:26:56 +0800974 cancel_delayed_work_sync(&cgroup->psi->avgs_work);
975 free_percpu(cgroup->psi->pcpu);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -0700976 /* All triggers must be removed by now */
Chen Wandun5f69a652022-05-26 20:26:56 +0800977 WARN_ONCE(cgroup->psi->poll_states, "psi: trigger leak\n");
978 kfree(cgroup->psi);
Johannes Weiner2ce71352018-10-26 15:06:31 -0700979}
980
981/**
982 * cgroup_move_task - move task to a different cgroup
983 * @task: the task
984 * @to: the target css_set
985 *
986 * Move task to a new cgroup and safely migrate its associated stall
987 * state between the different groups.
988 *
989 * This function acquires the task's rq lock to lock out concurrent
990 * changes to the task's scheduling state and - in case the task is
991 * running - concurrent changes to its stall state.
992 */
993void cgroup_move_task(struct task_struct *task, struct css_set *to)
994{
Johannes Weinerd583d362021-05-03 13:49:17 -0400995 unsigned int task_flags;
Johannes Weiner2ce71352018-10-26 15:06:31 -0700996 struct rq_flags rf;
997 struct rq *rq;
998
Johannes Weinere0c27442018-11-30 14:09:58 -0800999 if (static_branch_likely(&psi_disabled)) {
Olof Johansson8fcb2312018-11-16 15:08:00 -08001000 /*
1001 * Lame to do this here, but the scheduler cannot be locked
1002 * from the outside, so we move cgroups from inside sched/.
1003 */
1004 rcu_assign_pointer(task->cgroups, to);
1005 return;
Johannes Weiner2ce71352018-10-26 15:06:31 -07001006 }
1007
Olof Johansson8fcb2312018-11-16 15:08:00 -08001008 rq = task_rq_lock(task, &rf);
1009
Johannes Weinerd583d362021-05-03 13:49:17 -04001010 /*
1011 * We may race with schedule() dropping the rq lock between
1012 * deactivating prev and switching to next. Because the psi
1013 * updates from the deactivation are deferred to the switch
1014 * callback to save cgroup tree updates, the task's scheduling
1015 * state here is not coherent with its psi state:
1016 *
1017 * schedule() cgroup_move_task()
1018 * rq_lock()
1019 * deactivate_task()
1020 * p->on_rq = 0
1021 * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1022 * pick_next_task()
1023 * rq_unlock()
1024 * rq_lock()
1025 * psi_task_change() // old cgroup
1026 * task->cgroups = to
1027 * psi_task_change() // new cgroup
1028 * rq_unlock()
1029 * rq_lock()
1030 * psi_sched_switch() // does deferred updates in new cgroup
1031 *
1032 * Don't rely on the scheduling state. Use psi_flags instead.
1033 */
1034 task_flags = task->psi_flags;
Olof Johansson8fcb2312018-11-16 15:08:00 -08001035
1036 if (task_flags)
1037 psi_task_change(task, task_flags, 0);
1038
1039 /* See comment above */
Johannes Weiner2ce71352018-10-26 15:06:31 -07001040 rcu_assign_pointer(task->cgroups, to);
1041
Olof Johansson8fcb2312018-11-16 15:08:00 -08001042 if (task_flags)
1043 psi_task_change(task, 0, task_flags);
Johannes Weiner2ce71352018-10-26 15:06:31 -07001044
Olof Johansson8fcb2312018-11-16 15:08:00 -08001045 task_rq_unlock(rq, task, &rf);
Johannes Weiner2ce71352018-10-26 15:06:31 -07001046}
1047#endif /* CONFIG_CGROUPS */
1048
1049int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
Johannes Weinereb414682018-10-26 15:06:27 -07001050{
1051 int full;
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -07001052 u64 now;
Johannes Weinereb414682018-10-26 15:06:27 -07001053
Johannes Weinere0c27442018-11-30 14:09:58 -08001054 if (static_branch_likely(&psi_disabled))
Johannes Weinereb414682018-10-26 15:06:27 -07001055 return -EOPNOTSUPP;
1056
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -07001057 /* Update averages before reporting them */
1058 mutex_lock(&group->avgs_lock);
1059 now = sched_clock();
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001060 collect_percpu_times(group, PSI_AVGS, NULL);
Suren Baghdasaryan7fc70a32019-05-14 15:41:06 -07001061 if (now >= group->avg_next_update)
1062 group->avg_next_update = update_averages(group, now);
1063 mutex_unlock(&group->avgs_lock);
Johannes Weinereb414682018-10-26 15:06:27 -07001064
Chengming Zhoue7fcd762021-03-03 11:46:56 +08001065 for (full = 0; full < 2; full++) {
Chengming Zhou890d5502022-04-08 20:19:14 +08001066 unsigned long avg[3] = { 0, };
1067 u64 total = 0;
Johannes Weinereb414682018-10-26 15:06:27 -07001068 int w;
1069
Chengming Zhou890d5502022-04-08 20:19:14 +08001070 /* CPU FULL is undefined at the system level */
1071 if (!(group == &psi_system && res == PSI_CPU && full)) {
1072 for (w = 0; w < 3; w++)
1073 avg[w] = group->avg[res * 2 + full][w];
1074 total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1075 NSEC_PER_USEC);
1076 }
Johannes Weinereb414682018-10-26 15:06:27 -07001077
1078 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1079 full ? "full" : "some",
1080 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1081 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1082 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1083 total);
1084 }
1085
1086 return 0;
1087}
1088
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001089struct psi_trigger *psi_trigger_create(struct psi_group *group,
Hao Jia76b079e2022-08-06 20:05:09 +08001090 char *buf, enum psi_res res)
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001091{
1092 struct psi_trigger *t;
1093 enum psi_states state;
1094 u32 threshold_us;
1095 u32 window_us;
1096
1097 if (static_branch_likely(&psi_disabled))
1098 return ERR_PTR(-EOPNOTSUPP);
1099
1100 if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1101 state = PSI_IO_SOME + res * 2;
1102 else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1103 state = PSI_IO_FULL + res * 2;
1104 else
1105 return ERR_PTR(-EINVAL);
1106
1107 if (state >= PSI_NONIDLE)
1108 return ERR_PTR(-EINVAL);
1109
1110 if (window_us < WINDOW_MIN_US ||
1111 window_us > WINDOW_MAX_US)
1112 return ERR_PTR(-EINVAL);
1113
1114 /* Check threshold */
1115 if (threshold_us == 0 || threshold_us > window_us)
1116 return ERR_PTR(-EINVAL);
1117
1118 t = kmalloc(sizeof(*t), GFP_KERNEL);
1119 if (!t)
1120 return ERR_PTR(-ENOMEM);
1121
1122 t->group = group;
1123 t->state = state;
1124 t->threshold = threshold_us * NSEC_PER_USEC;
1125 t->win.size = window_us * NSEC_PER_USEC;
Hailong Liu915a0872022-04-01 13:10:11 +08001126 window_reset(&t->win, sched_clock(),
1127 group->total[PSI_POLL][t->state], 0);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001128
1129 t->event = 0;
1130 t->last_event_time = 0;
1131 init_waitqueue_head(&t->event_wait);
Zhaoyang Huange6df4ea2022-01-25 14:56:58 +08001132 t->pending_event = false;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001133
1134 mutex_lock(&group->trigger_lock);
1135
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001136 if (!rcu_access_pointer(group->poll_task)) {
1137 struct task_struct *task;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001138
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001139 task = kthread_create(psi_poll_worker, group, "psimon");
1140 if (IS_ERR(task)) {
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001141 kfree(t);
1142 mutex_unlock(&group->trigger_lock);
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001143 return ERR_CAST(task);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001144 }
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001145 atomic_set(&group->poll_wakeup, 0);
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001146 wake_up_process(task);
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001147 rcu_assign_pointer(group->poll_task, task);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001148 }
1149
1150 list_add(&t->node, &group->triggers);
1151 group->poll_min_period = min(group->poll_min_period,
1152 div_u64(t->win.size, UPDATES_PER_WINDOW));
1153 group->nr_triggers[t->state]++;
1154 group->poll_states |= (1 << t->state);
1155
1156 mutex_unlock(&group->trigger_lock);
1157
1158 return t;
1159}
1160
Suren Baghdasaryana06247c2022-01-11 15:23:09 -08001161void psi_trigger_destroy(struct psi_trigger *t)
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001162{
Suren Baghdasaryana06247c2022-01-11 15:23:09 -08001163 struct psi_group *group;
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001164 struct task_struct *task_to_destroy = NULL;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001165
Suren Baghdasaryana06247c2022-01-11 15:23:09 -08001166 /*
1167 * We do not check psi_disabled since it might have been disabled after
1168 * the trigger got created.
1169 */
1170 if (!t)
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001171 return;
1172
Suren Baghdasaryana06247c2022-01-11 15:23:09 -08001173 group = t->group;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001174 /*
1175 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
1176 * from under a polling process.
1177 */
1178 wake_up_interruptible(&t->event_wait);
1179
1180 mutex_lock(&group->trigger_lock);
1181
1182 if (!list_empty(&t->node)) {
1183 struct psi_trigger *tmp;
1184 u64 period = ULLONG_MAX;
1185
1186 list_del(&t->node);
1187 group->nr_triggers[t->state]--;
1188 if (!group->nr_triggers[t->state])
1189 group->poll_states &= ~(1 << t->state);
1190 /* reset min update period for the remaining triggers */
1191 list_for_each_entry(tmp, &group->triggers, node)
1192 period = min(period, div_u64(tmp->win.size,
1193 UPDATES_PER_WINDOW));
1194 group->poll_min_period = period;
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001195 /* Destroy poll_task when the last trigger is destroyed */
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001196 if (group->poll_states == 0) {
1197 group->polling_until = 0;
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001198 task_to_destroy = rcu_dereference_protected(
1199 group->poll_task,
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001200 lockdep_is_held(&group->trigger_lock));
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001201 rcu_assign_pointer(group->poll_task, NULL);
Zhaoyang Huang8f91efd2021-06-11 08:29:34 +08001202 del_timer(&group->poll_timer);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001203 }
1204 }
1205
1206 mutex_unlock(&group->trigger_lock);
1207
1208 /*
Suren Baghdasaryana06247c2022-01-11 15:23:09 -08001209 * Wait for psi_schedule_poll_work RCU to complete its read-side
1210 * critical section before destroying the trigger and optionally the
1211 * poll_task.
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001212 */
1213 synchronize_rcu();
1214 /*
Zhaoyang Huang8f91efd2021-06-11 08:29:34 +08001215 * Stop kthread 'psimon' after releasing trigger_lock to prevent a
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001216 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1217 */
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001218 if (task_to_destroy) {
Jason Xing7b2b55d2019-08-24 17:54:53 -07001219 /*
1220 * After the RCU grace period has expired, the worker
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001221 * can no longer be found through group->poll_task.
Jason Xing7b2b55d2019-08-24 17:54:53 -07001222 */
Suren Baghdasaryan461daba2020-05-28 12:54:42 -07001223 kthread_stop(task_to_destroy);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001224 }
1225 kfree(t);
1226}
1227
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001228__poll_t psi_trigger_poll(void **trigger_ptr,
1229 struct file *file, poll_table *wait)
1230{
1231 __poll_t ret = DEFAULT_POLLMASK;
1232 struct psi_trigger *t;
1233
1234 if (static_branch_likely(&psi_disabled))
1235 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1236
Suren Baghdasaryana06247c2022-01-11 15:23:09 -08001237 t = smp_load_acquire(trigger_ptr);
1238 if (!t)
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001239 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001240
1241 poll_wait(file, &t->event_wait, wait);
1242
1243 if (cmpxchg(&t->event, 1, 0) == 1)
1244 ret |= EPOLLPRI;
1245
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001246 return ret;
1247}
1248
Suren Baghdasaryan5102bb12022-01-19 14:39:40 -08001249#ifdef CONFIG_PROC_FS
1250static int psi_io_show(struct seq_file *m, void *v)
1251{
1252 return psi_show(m, &psi_system, PSI_IO);
1253}
1254
1255static int psi_memory_show(struct seq_file *m, void *v)
1256{
1257 return psi_show(m, &psi_system, PSI_MEM);
1258}
1259
1260static int psi_cpu_show(struct seq_file *m, void *v)
1261{
1262 return psi_show(m, &psi_system, PSI_CPU);
1263}
1264
1265static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
1266{
1267 if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
1268 return -EPERM;
1269
1270 return single_open(file, psi_show, NULL);
1271}
1272
1273static int psi_io_open(struct inode *inode, struct file *file)
1274{
1275 return psi_open(file, psi_io_show);
1276}
1277
1278static int psi_memory_open(struct inode *inode, struct file *file)
1279{
1280 return psi_open(file, psi_memory_show);
1281}
1282
1283static int psi_cpu_open(struct inode *inode, struct file *file)
1284{
1285 return psi_open(file, psi_cpu_show);
1286}
1287
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001288static ssize_t psi_write(struct file *file, const char __user *user_buf,
1289 size_t nbytes, enum psi_res res)
1290{
1291 char buf[32];
1292 size_t buf_size;
1293 struct seq_file *seq;
1294 struct psi_trigger *new;
1295
1296 if (static_branch_likely(&psi_disabled))
1297 return -EOPNOTSUPP;
1298
Suren Baghdasaryan6fcca0f2020-02-03 13:22:16 -08001299 if (!nbytes)
1300 return -EINVAL;
1301
Miles Chen4adcdce2019-09-12 18:34:52 +08001302 buf_size = min(nbytes, sizeof(buf));
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001303 if (copy_from_user(buf, user_buf, buf_size))
1304 return -EFAULT;
1305
1306 buf[buf_size - 1] = '\0';
1307
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001308 seq = file->private_data;
Suren Baghdasaryana06247c2022-01-11 15:23:09 -08001309
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001310 /* Take seq->lock to protect seq->private from concurrent writes */
1311 mutex_lock(&seq->lock);
Suren Baghdasaryana06247c2022-01-11 15:23:09 -08001312
1313 /* Allow only one trigger per file descriptor */
1314 if (seq->private) {
1315 mutex_unlock(&seq->lock);
1316 return -EBUSY;
1317 }
1318
Hao Jia76b079e2022-08-06 20:05:09 +08001319 new = psi_trigger_create(&psi_system, buf, res);
Suren Baghdasaryana06247c2022-01-11 15:23:09 -08001320 if (IS_ERR(new)) {
1321 mutex_unlock(&seq->lock);
1322 return PTR_ERR(new);
1323 }
1324
1325 smp_store_release(&seq->private, new);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001326 mutex_unlock(&seq->lock);
1327
1328 return nbytes;
1329}
1330
1331static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1332 size_t nbytes, loff_t *ppos)
1333{
1334 return psi_write(file, user_buf, nbytes, PSI_IO);
1335}
1336
1337static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1338 size_t nbytes, loff_t *ppos)
1339{
1340 return psi_write(file, user_buf, nbytes, PSI_MEM);
1341}
1342
1343static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1344 size_t nbytes, loff_t *ppos)
1345{
1346 return psi_write(file, user_buf, nbytes, PSI_CPU);
1347}
1348
1349static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1350{
1351 struct seq_file *seq = file->private_data;
1352
1353 return psi_trigger_poll(&seq->private, file, wait);
1354}
1355
1356static int psi_fop_release(struct inode *inode, struct file *file)
1357{
1358 struct seq_file *seq = file->private_data;
1359
Suren Baghdasaryana06247c2022-01-11 15:23:09 -08001360 psi_trigger_destroy(seq->private);
Suren Baghdasaryan0e946822019-05-14 15:41:15 -07001361 return single_release(inode, file);
1362}
1363
Alexey Dobriyan97a32532020-02-03 17:37:17 -08001364static const struct proc_ops psi_io_proc_ops = {
1365 .proc_open = psi_io_open,
1366 .proc_read = seq_read,
1367 .proc_lseek = seq_lseek,
1368 .proc_write = psi_io_write,
1369 .proc_poll = psi_fop_poll,
1370 .proc_release = psi_fop_release,
Johannes Weinereb414682018-10-26 15:06:27 -07001371};
1372
Alexey Dobriyan97a32532020-02-03 17:37:17 -08001373static const struct proc_ops psi_memory_proc_ops = {
1374 .proc_open = psi_memory_open,
1375 .proc_read = seq_read,
1376 .proc_lseek = seq_lseek,
1377 .proc_write = psi_memory_write,
1378 .proc_poll = psi_fop_poll,
1379 .proc_release = psi_fop_release,
Johannes Weinereb414682018-10-26 15:06:27 -07001380};
1381
Alexey Dobriyan97a32532020-02-03 17:37:17 -08001382static const struct proc_ops psi_cpu_proc_ops = {
1383 .proc_open = psi_cpu_open,
1384 .proc_read = seq_read,
1385 .proc_lseek = seq_lseek,
1386 .proc_write = psi_cpu_write,
1387 .proc_poll = psi_fop_poll,
1388 .proc_release = psi_fop_release,
Johannes Weinereb414682018-10-26 15:06:27 -07001389};
1390
1391static int __init psi_proc_init(void)
1392{
Wang Long3d817682019-12-18 20:38:18 +08001393 if (psi_enable) {
1394 proc_mkdir("pressure", NULL);
Josh Hunt6db12ee2021-04-01 22:58:33 -04001395 proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
1396 proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
1397 proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
Wang Long3d817682019-12-18 20:38:18 +08001398 }
Johannes Weinereb414682018-10-26 15:06:27 -07001399 return 0;
1400}
1401module_init(psi_proc_init);
Suren Baghdasaryan5102bb12022-01-19 14:39:40 -08001402
1403#endif /* CONFIG_PROC_FS */