Thomas Gleixner | 7170066 | 2019-05-19 15:51:55 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 2 | /* delayacct.c - per-task delay accounting |
| 3 | * |
| 4 | * Copyright (C) Shailabh Nagar, IBM Corp. 2006 |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/sched.h> |
Ingo Molnar | 9164bb4 | 2017-02-04 01:20:53 +0100 | [diff] [blame] | 8 | #include <linux/sched/task.h> |
Ingo Molnar | 32ef551 | 2017-02-05 11:48:36 +0100 | [diff] [blame] | 9 | #include <linux/sched/cputime.h> |
Peter Zijlstra | 4b7a08a | 2021-05-04 22:43:48 +0200 | [diff] [blame] | 10 | #include <linux/sched/clock.h> |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 11 | #include <linux/slab.h> |
Alexey Dobriyan | 6952b61 | 2009-09-18 23:55:55 +0400 | [diff] [blame] | 12 | #include <linux/taskstats.h> |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 13 | #include <linux/sysctl.h> |
| 14 | #include <linux/delayacct.h> |
Glauber Costa | c9aaa89 | 2011-07-11 15:28:14 -0400 | [diff] [blame] | 15 | #include <linux/module.h> |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 16 | |
Peter Zijlstra | e4042ad | 2021-05-04 22:43:32 +0200 | [diff] [blame] | 17 | DEFINE_STATIC_KEY_FALSE(delayacct_key); |
| 18 | int delayacct_on __read_mostly; /* Delay accounting turned on/off */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 19 | struct kmem_cache *delayacct_cache; |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 20 | |
Peter Zijlstra | 0cd7c74 | 2021-05-10 14:01:00 +0200 | [diff] [blame] | 21 | static void set_delayacct(bool enabled) |
| 22 | { |
| 23 | if (enabled) { |
| 24 | static_branch_enable(&delayacct_key); |
| 25 | delayacct_on = 1; |
| 26 | } else { |
| 27 | delayacct_on = 0; |
| 28 | static_branch_disable(&delayacct_key); |
| 29 | } |
| 30 | } |
| 31 | |
Peter Zijlstra | e4042ad | 2021-05-04 22:43:32 +0200 | [diff] [blame] | 32 | static int __init delayacct_setup_enable(char *str) |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 33 | { |
Peter Zijlstra | e4042ad | 2021-05-04 22:43:32 +0200 | [diff] [blame] | 34 | delayacct_on = 1; |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 35 | return 1; |
| 36 | } |
Peter Zijlstra | e4042ad | 2021-05-04 22:43:32 +0200 | [diff] [blame] | 37 | __setup("delayacct", delayacct_setup_enable); |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 38 | |
| 39 | void delayacct_init(void) |
| 40 | { |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 41 | delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC|SLAB_ACCOUNT); |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 42 | delayacct_tsk_init(&init_task); |
Peter Zijlstra | 0cd7c74 | 2021-05-10 14:01:00 +0200 | [diff] [blame] | 43 | set_delayacct(delayacct_on); |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 44 | } |
| 45 | |
Peter Zijlstra | 0cd7c74 | 2021-05-10 14:01:00 +0200 | [diff] [blame] | 46 | #ifdef CONFIG_PROC_SYSCTL |
tangmeng | 1186618 | 2022-02-18 18:59:36 +0800 | [diff] [blame] | 47 | static int sysctl_delayacct(struct ctl_table *table, int write, void *buffer, |
Peter Zijlstra | 0cd7c74 | 2021-05-10 14:01:00 +0200 | [diff] [blame] | 48 | size_t *lenp, loff_t *ppos) |
| 49 | { |
| 50 | int state = delayacct_on; |
| 51 | struct ctl_table t; |
| 52 | int err; |
| 53 | |
| 54 | if (write && !capable(CAP_SYS_ADMIN)) |
| 55 | return -EPERM; |
| 56 | |
| 57 | t = *table; |
| 58 | t.data = &state; |
| 59 | err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); |
| 60 | if (err < 0) |
| 61 | return err; |
| 62 | if (write) |
| 63 | set_delayacct(state); |
| 64 | return err; |
| 65 | } |
tangmeng | 1186618 | 2022-02-18 18:59:36 +0800 | [diff] [blame] | 66 | |
| 67 | static struct ctl_table kern_delayacct_table[] = { |
| 68 | { |
| 69 | .procname = "task_delayacct", |
| 70 | .data = NULL, |
| 71 | .maxlen = sizeof(unsigned int), |
| 72 | .mode = 0644, |
| 73 | .proc_handler = sysctl_delayacct, |
| 74 | .extra1 = SYSCTL_ZERO, |
| 75 | .extra2 = SYSCTL_ONE, |
| 76 | }, |
| 77 | { } |
| 78 | }; |
| 79 | |
| 80 | static __init int kernel_delayacct_sysctls_init(void) |
| 81 | { |
| 82 | register_sysctl_init("kernel", kern_delayacct_table); |
| 83 | return 0; |
| 84 | } |
| 85 | late_initcall(kernel_delayacct_sysctls_init); |
Peter Zijlstra | 0cd7c74 | 2021-05-10 14:01:00 +0200 | [diff] [blame] | 86 | #endif |
| 87 | |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 88 | void __delayacct_tsk_init(struct task_struct *tsk) |
| 89 | { |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 90 | tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 91 | if (tsk->delays) |
Sebastian Andrzej Siewior | 02acc80 | 2018-04-23 18:10:23 +0200 | [diff] [blame] | 92 | raw_spin_lock_init(&tsk->delays->lock); |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 93 | } |
| 94 | |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 95 | /* |
Thomas Gleixner | 9667a23 | 2014-07-16 21:04:35 +0000 | [diff] [blame] | 96 | * Finish delay accounting for a statistic using its timestamps (@start), |
| 97 | * accumalator (@total) and @count |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 98 | */ |
Peter Zijlstra | 4b7a08a | 2021-05-04 22:43:48 +0200 | [diff] [blame] | 99 | static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count) |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 100 | { |
Peter Zijlstra | 4b7a08a | 2021-05-04 22:43:48 +0200 | [diff] [blame] | 101 | s64 ns = local_clock() - *start; |
Peter Zijlstra | 64efade | 2006-11-05 23:52:10 -0800 | [diff] [blame] | 102 | unsigned long flags; |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 103 | |
Thomas Gleixner | 9667a23 | 2014-07-16 21:04:35 +0000 | [diff] [blame] | 104 | if (ns > 0) { |
Sebastian Andrzej Siewior | 02acc80 | 2018-04-23 18:10:23 +0200 | [diff] [blame] | 105 | raw_spin_lock_irqsave(lock, flags); |
Thomas Gleixner | 9667a23 | 2014-07-16 21:04:35 +0000 | [diff] [blame] | 106 | *total += ns; |
| 107 | (*count)++; |
Sebastian Andrzej Siewior | 02acc80 | 2018-04-23 18:10:23 +0200 | [diff] [blame] | 108 | raw_spin_unlock_irqrestore(lock, flags); |
Thomas Gleixner | 9667a23 | 2014-07-16 21:04:35 +0000 | [diff] [blame] | 109 | } |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 110 | } |
| 111 | |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 112 | void __delayacct_blkio_start(void) |
| 113 | { |
Peter Zijlstra | 4b7a08a | 2021-05-04 22:43:48 +0200 | [diff] [blame] | 114 | current->delays->blkio_start = local_clock(); |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 115 | } |
| 116 | |
Josh Snyder | c96f547 | 2017-12-18 16:15:10 +0000 | [diff] [blame] | 117 | /* |
| 118 | * We cannot rely on the `current` macro, as we haven't yet switched back to |
| 119 | * the process being woken. |
| 120 | */ |
| 121 | void __delayacct_blkio_end(struct task_struct *p) |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 122 | { |
Yang Yang | a3d5dc9 | 2022-01-19 18:10:02 -0800 | [diff] [blame] | 123 | delayacct_end(&p->delays->lock, |
| 124 | &p->delays->blkio_start, |
| 125 | &p->delays->blkio_delay, |
| 126 | &p->delays->blkio_count); |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 127 | } |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 128 | |
Peter Zijlstra | e4042ad | 2021-05-04 22:43:32 +0200 | [diff] [blame] | 129 | int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 130 | { |
Frederic Weisbecker | dbf3da1 | 2017-01-31 04:09:29 +0100 | [diff] [blame] | 131 | u64 utime, stime, stimescaled, utimescaled; |
Thomas Gleixner | 68f6783d | 2014-07-16 21:04:37 +0000 | [diff] [blame] | 132 | unsigned long long t2, t3; |
| 133 | unsigned long flags, t1; |
| 134 | s64 tmp; |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 135 | |
Frederic Weisbecker | dbf3da1 | 2017-01-31 04:09:29 +0100 | [diff] [blame] | 136 | task_cputime(tsk, &utime, &stime); |
Thomas Gleixner | 68f6783d | 2014-07-16 21:04:37 +0000 | [diff] [blame] | 137 | tmp = (s64)d->cpu_run_real_total; |
Frederic Weisbecker | dbf3da1 | 2017-01-31 04:09:29 +0100 | [diff] [blame] | 138 | tmp += utime + stime; |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 139 | d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; |
| 140 | |
Frederic Weisbecker | dbf3da1 | 2017-01-31 04:09:29 +0100 | [diff] [blame] | 141 | task_cputime_scaled(tsk, &utimescaled, &stimescaled); |
Thomas Gleixner | 68f6783d | 2014-07-16 21:04:37 +0000 | [diff] [blame] | 142 | tmp = (s64)d->cpu_scaled_run_real_total; |
Frederic Weisbecker | dbf3da1 | 2017-01-31 04:09:29 +0100 | [diff] [blame] | 143 | tmp += utimescaled + stimescaled; |
Michael Neuling | c66f08b | 2007-10-18 03:06:34 -0700 | [diff] [blame] | 144 | d->cpu_scaled_run_real_total = |
| 145 | (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; |
| 146 | |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 147 | /* |
| 148 | * No locking available for sched_info (and too expensive to add one) |
| 149 | * Mitigate by taking snapshot of values |
| 150 | */ |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 151 | t1 = tsk->sched_info.pcount; |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 152 | t2 = tsk->sched_info.run_delay; |
Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 153 | t3 = tsk->se.sum_exec_runtime; |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 154 | |
| 155 | d->cpu_count += t1; |
| 156 | |
Balbir Singh | 172ba84 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 157 | tmp = (s64)d->cpu_delay_total + t2; |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 158 | d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; |
| 159 | |
Balbir Singh | 172ba84 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 160 | tmp = (s64)d->cpu_run_virtual_total + t3; |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 161 | d->cpu_run_virtual_total = |
| 162 | (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp; |
| 163 | |
Peter Zijlstra | e4042ad | 2021-05-04 22:43:32 +0200 | [diff] [blame] | 164 | if (!tsk->delays) |
| 165 | return 0; |
| 166 | |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 167 | /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ |
| 168 | |
Sebastian Andrzej Siewior | 02acc80 | 2018-04-23 18:10:23 +0200 | [diff] [blame] | 169 | raw_spin_lock_irqsave(&tsk->delays->lock, flags); |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 170 | tmp = d->blkio_delay_total + tsk->delays->blkio_delay; |
| 171 | d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; |
| 172 | tmp = d->swapin_delay_total + tsk->delays->swapin_delay; |
| 173 | d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; |
Keika Kobayashi | 016ae21 | 2008-07-25 01:48:53 -0700 | [diff] [blame] | 174 | tmp = d->freepages_delay_total + tsk->delays->freepages_delay; |
| 175 | d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp; |
Johannes Weiner | b1d29ba | 2018-10-26 15:06:08 -0700 | [diff] [blame] | 176 | tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay; |
| 177 | d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp; |
wangyong | 5bf1828 | 2022-01-19 18:10:15 -0800 | [diff] [blame] | 178 | tmp = d->compact_delay_total + tsk->delays->compact_delay; |
| 179 | d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp; |
Yang Yang | 662ce1d | 2022-06-01 15:55:25 -0700 | [diff] [blame] | 180 | tmp = d->wpcopy_delay_total + tsk->delays->wpcopy_delay; |
| 181 | d->wpcopy_delay_total = (tmp < d->wpcopy_delay_total) ? 0 : tmp; |
Yang Yang | a3b2aea | 2023-04-08 17:28:35 +0800 | [diff] [blame] | 182 | tmp = d->irq_delay_total + tsk->delays->irq_delay; |
| 183 | d->irq_delay_total = (tmp < d->irq_delay_total) ? 0 : tmp; |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 184 | d->blkio_count += tsk->delays->blkio_count; |
| 185 | d->swapin_count += tsk->delays->swapin_count; |
Keika Kobayashi | 016ae21 | 2008-07-25 01:48:53 -0700 | [diff] [blame] | 186 | d->freepages_count += tsk->delays->freepages_count; |
Johannes Weiner | b1d29ba | 2018-10-26 15:06:08 -0700 | [diff] [blame] | 187 | d->thrashing_count += tsk->delays->thrashing_count; |
wangyong | 5bf1828 | 2022-01-19 18:10:15 -0800 | [diff] [blame] | 188 | d->compact_count += tsk->delays->compact_count; |
Yang Yang | 662ce1d | 2022-06-01 15:55:25 -0700 | [diff] [blame] | 189 | d->wpcopy_count += tsk->delays->wpcopy_count; |
Yang Yang | a3b2aea | 2023-04-08 17:28:35 +0800 | [diff] [blame] | 190 | d->irq_count += tsk->delays->irq_count; |
Sebastian Andrzej Siewior | 02acc80 | 2018-04-23 18:10:23 +0200 | [diff] [blame] | 191 | raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 192 | |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 193 | return 0; |
| 194 | } |
Shailabh Nagar | 2589045 | 2006-07-14 00:24:43 -0700 | [diff] [blame] | 195 | |
| 196 | __u64 __delayacct_blkio_ticks(struct task_struct *tsk) |
| 197 | { |
| 198 | __u64 ret; |
Peter Zijlstra | 64efade | 2006-11-05 23:52:10 -0800 | [diff] [blame] | 199 | unsigned long flags; |
Shailabh Nagar | 2589045 | 2006-07-14 00:24:43 -0700 | [diff] [blame] | 200 | |
Sebastian Andrzej Siewior | 02acc80 | 2018-04-23 18:10:23 +0200 | [diff] [blame] | 201 | raw_spin_lock_irqsave(&tsk->delays->lock, flags); |
Yang Yang | a3d5dc9 | 2022-01-19 18:10:02 -0800 | [diff] [blame] | 202 | ret = nsec_to_clock_t(tsk->delays->blkio_delay); |
Sebastian Andrzej Siewior | 02acc80 | 2018-04-23 18:10:23 +0200 | [diff] [blame] | 203 | raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); |
Shailabh Nagar | 2589045 | 2006-07-14 00:24:43 -0700 | [diff] [blame] | 204 | return ret; |
| 205 | } |
| 206 | |
Keika Kobayashi | 873b477 | 2008-07-25 01:48:52 -0700 | [diff] [blame] | 207 | void __delayacct_freepages_start(void) |
| 208 | { |
Peter Zijlstra | 4b7a08a | 2021-05-04 22:43:48 +0200 | [diff] [blame] | 209 | current->delays->freepages_start = local_clock(); |
Keika Kobayashi | 873b477 | 2008-07-25 01:48:52 -0700 | [diff] [blame] | 210 | } |
| 211 | |
| 212 | void __delayacct_freepages_end(void) |
| 213 | { |
Peter Zijlstra | 4b7a08a | 2021-05-04 22:43:48 +0200 | [diff] [blame] | 214 | delayacct_end(¤t->delays->lock, |
| 215 | ¤t->delays->freepages_start, |
| 216 | ¤t->delays->freepages_delay, |
| 217 | ¤t->delays->freepages_count); |
Keika Kobayashi | 873b477 | 2008-07-25 01:48:52 -0700 | [diff] [blame] | 218 | } |
| 219 | |
Yang Yang | aa1cf99 | 2022-08-15 07:11:35 +0000 | [diff] [blame] | 220 | void __delayacct_thrashing_start(bool *in_thrashing) |
Johannes Weiner | b1d29ba | 2018-10-26 15:06:08 -0700 | [diff] [blame] | 221 | { |
Yang Yang | aa1cf99 | 2022-08-15 07:11:35 +0000 | [diff] [blame] | 222 | *in_thrashing = !!current->in_thrashing; |
| 223 | if (*in_thrashing) |
| 224 | return; |
| 225 | |
| 226 | current->in_thrashing = 1; |
Peter Zijlstra | 4b7a08a | 2021-05-04 22:43:48 +0200 | [diff] [blame] | 227 | current->delays->thrashing_start = local_clock(); |
Johannes Weiner | b1d29ba | 2018-10-26 15:06:08 -0700 | [diff] [blame] | 228 | } |
| 229 | |
Yang Yang | aa1cf99 | 2022-08-15 07:11:35 +0000 | [diff] [blame] | 230 | void __delayacct_thrashing_end(bool *in_thrashing) |
Johannes Weiner | b1d29ba | 2018-10-26 15:06:08 -0700 | [diff] [blame] | 231 | { |
Yang Yang | aa1cf99 | 2022-08-15 07:11:35 +0000 | [diff] [blame] | 232 | if (*in_thrashing) |
| 233 | return; |
| 234 | |
| 235 | current->in_thrashing = 0; |
Johannes Weiner | b1d29ba | 2018-10-26 15:06:08 -0700 | [diff] [blame] | 236 | delayacct_end(¤t->delays->lock, |
| 237 | ¤t->delays->thrashing_start, |
| 238 | ¤t->delays->thrashing_delay, |
| 239 | ¤t->delays->thrashing_count); |
| 240 | } |
Yang Yang | a3d5dc9 | 2022-01-19 18:10:02 -0800 | [diff] [blame] | 241 | |
| 242 | void __delayacct_swapin_start(void) |
| 243 | { |
| 244 | current->delays->swapin_start = local_clock(); |
| 245 | } |
| 246 | |
| 247 | void __delayacct_swapin_end(void) |
| 248 | { |
| 249 | delayacct_end(¤t->delays->lock, |
| 250 | ¤t->delays->swapin_start, |
| 251 | ¤t->delays->swapin_delay, |
| 252 | ¤t->delays->swapin_count); |
| 253 | } |
wangyong | 5bf1828 | 2022-01-19 18:10:15 -0800 | [diff] [blame] | 254 | |
| 255 | void __delayacct_compact_start(void) |
| 256 | { |
| 257 | current->delays->compact_start = local_clock(); |
| 258 | } |
| 259 | |
| 260 | void __delayacct_compact_end(void) |
| 261 | { |
| 262 | delayacct_end(¤t->delays->lock, |
| 263 | ¤t->delays->compact_start, |
| 264 | ¤t->delays->compact_delay, |
| 265 | ¤t->delays->compact_count); |
| 266 | } |
Yang Yang | 662ce1d | 2022-06-01 15:55:25 -0700 | [diff] [blame] | 267 | |
| 268 | void __delayacct_wpcopy_start(void) |
| 269 | { |
| 270 | current->delays->wpcopy_start = local_clock(); |
| 271 | } |
| 272 | |
| 273 | void __delayacct_wpcopy_end(void) |
| 274 | { |
| 275 | delayacct_end(¤t->delays->lock, |
| 276 | ¤t->delays->wpcopy_start, |
| 277 | ¤t->delays->wpcopy_delay, |
| 278 | ¤t->delays->wpcopy_count); |
| 279 | } |
Yang Yang | a3b2aea | 2023-04-08 17:28:35 +0800 | [diff] [blame] | 280 | |
| 281 | void __delayacct_irq(struct task_struct *task, u32 delta) |
| 282 | { |
| 283 | unsigned long flags; |
| 284 | |
| 285 | raw_spin_lock_irqsave(&task->delays->lock, flags); |
| 286 | task->delays->irq_delay += delta; |
| 287 | task->delays->irq_count++; |
| 288 | raw_spin_unlock_irqrestore(&task->delays->lock, flags); |
| 289 | } |
| 290 | |