Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | |
| 2 | #ifdef CONFIG_SCHEDSTATS |
Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 3 | |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4 | /* |
| 5 | * Expects runqueue lock to be held for atomicity of update |
| 6 | */ |
| 7 | static inline void |
| 8 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
| 9 | { |
| 10 | if (rq) { |
| 11 | rq->rq_sched_info.run_delay += delta; |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 12 | rq->rq_sched_info.pcount++; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 13 | } |
| 14 | } |
| 15 | |
| 16 | /* |
| 17 | * Expects runqueue lock to be held for atomicity of update |
| 18 | */ |
| 19 | static inline void |
| 20 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
| 21 | { |
| 22 | if (rq) |
Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 23 | rq->rq_cpu_time += delta; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 24 | } |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 25 | |
| 26 | static inline void |
| 27 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
| 28 | { |
| 29 | if (rq) |
| 30 | rq->rq_sched_info.run_delay += delta; |
| 31 | } |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 32 | # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) |
| 33 | # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) |
Ingo Molnar | c3c7011 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 34 | # define schedstat_set(var, val) do { var = (val); } while (0) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 35 | #else /* !CONFIG_SCHEDSTATS */ |
| 36 | static inline void |
| 37 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
| 38 | {} |
| 39 | static inline void |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 40 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
| 41 | {} |
| 42 | static inline void |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 43 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
| 44 | {} |
| 45 | # define schedstat_inc(rq, field) do { } while (0) |
| 46 | # define schedstat_add(rq, field, amt) do { } while (0) |
Ingo Molnar | c3c7011 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 47 | # define schedstat_set(var, val) do { } while (0) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 48 | #endif |
| 49 | |
Naveen N. Rao | f6db834 | 2015-06-25 23:53:37 +0530 | [diff] [blame] | 50 | #ifdef CONFIG_SCHED_INFO |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 51 | static inline void sched_info_reset_dequeued(struct task_struct *t) |
| 52 | { |
| 53 | t->sched_info.last_queued = 0; |
| 54 | } |
| 55 | |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 56 | /* |
Rakib Mullick | d4a6f3c | 2010-10-24 16:28:47 +0600 | [diff] [blame] | 57 | * We are interested in knowing how long it was from the *first* time a |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 58 | * task was queued to the time that it finally hit a cpu, we call this routine |
| 59 | * from dequeue_task() to account for possible rq->clock skew across cpus. The |
| 60 | * delta taken on each cpu would annul the skew. |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 61 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 62 | static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 63 | { |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 64 | unsigned long long now = rq_clock(rq), delta = 0; |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 65 | |
| 66 | if (unlikely(sched_info_on())) |
| 67 | if (t->sched_info.last_queued) |
| 68 | delta = now - t->sched_info.last_queued; |
| 69 | sched_info_reset_dequeued(t); |
| 70 | t->sched_info.run_delay += delta; |
| 71 | |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 72 | rq_sched_info_dequeued(rq, delta); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | /* |
| 76 | * Called when a task finally hits the cpu. We can now calculate how |
| 77 | * long it was waiting to run. We also note when it began so that we |
| 78 | * can keep stats on how long its timeslice is. |
| 79 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 80 | static void sched_info_arrive(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 81 | { |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 82 | unsigned long long now = rq_clock(rq), delta = 0; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 83 | |
| 84 | if (t->sched_info.last_queued) |
| 85 | delta = now - t->sched_info.last_queued; |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 86 | sched_info_reset_dequeued(t); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 87 | t->sched_info.run_delay += delta; |
| 88 | t->sched_info.last_arrival = now; |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 89 | t->sched_info.pcount++; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 90 | |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 91 | rq_sched_info_arrive(rq, delta); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | /* |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 95 | * This function is only called from enqueue_task(), but also only updates |
| 96 | * the timestamp if it is already not set. It's assumed that |
| 97 | * sched_info_dequeued() will clear that stamp when appropriate. |
| 98 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 99 | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 100 | { |
| 101 | if (unlikely(sched_info_on())) |
| 102 | if (!t->sched_info.last_queued) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 103 | t->sched_info.last_queued = rq_clock(rq); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | /* |
Michael S. Tsirkin | 13b62e4 | 2013-09-16 11:30:36 +0300 | [diff] [blame] | 107 | * Called when a process ceases being the active-running process involuntarily |
| 108 | * due, typically, to expiring its time slice (this may also be called when |
| 109 | * switching to the idle task). Now we can calculate how long we ran. |
Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 110 | * Also, if the process is still in the TASK_RUNNING state, call |
| 111 | * sched_info_queued() to mark that it has now again started waiting on |
| 112 | * the runqueue. |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 113 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 114 | static inline void sched_info_depart(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 115 | { |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 116 | unsigned long long delta = rq_clock(rq) - |
Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 117 | t->sched_info.last_arrival; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 118 | |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 119 | rq_sched_info_depart(rq, delta); |
Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 120 | |
| 121 | if (t->state == TASK_RUNNING) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 122 | sched_info_queued(rq, t); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | /* |
| 126 | * Called when tasks are switched involuntarily due, typically, to expiring |
| 127 | * their time slice. (This may also be called when switching to or from |
| 128 | * the idle task.) We are only called when prev != next. |
| 129 | */ |
| 130 | static inline void |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 131 | __sched_info_switch(struct rq *rq, |
| 132 | struct task_struct *prev, struct task_struct *next) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 133 | { |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 134 | /* |
| 135 | * prev now departs the cpu. It's not interesting to record |
| 136 | * stats about how efficient we were at scheduling the idle |
| 137 | * process, however. |
| 138 | */ |
| 139 | if (prev != rq->idle) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 140 | sched_info_depart(rq, prev); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 141 | |
| 142 | if (next != rq->idle) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 143 | sched_info_arrive(rq, next); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 144 | } |
| 145 | static inline void |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 146 | sched_info_switch(struct rq *rq, |
| 147 | struct task_struct *prev, struct task_struct *next) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 148 | { |
| 149 | if (unlikely(sched_info_on())) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 150 | __sched_info_switch(rq, prev, next); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 151 | } |
| 152 | #else |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 153 | #define sched_info_queued(rq, t) do { } while (0) |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 154 | #define sched_info_reset_dequeued(t) do { } while (0) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 155 | #define sched_info_dequeued(rq, t) do { } while (0) |
| 156 | #define sched_info_depart(rq, t) do { } while (0) |
| 157 | #define sched_info_arrive(rq, next) do { } while (0) |
| 158 | #define sched_info_switch(rq, t, next) do { } while (0) |
Naveen N. Rao | f6db834 | 2015-06-25 23:53:37 +0530 | [diff] [blame] | 159 | #endif /* CONFIG_SCHED_INFO */ |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 160 | |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 161 | /* |
| 162 | * The following are functions that support scheduler-internal time accounting. |
| 163 | * These functions are generally called at the timer tick. None of this depends |
| 164 | * on CONFIG_SCHEDSTATS. |
| 165 | */ |
| 166 | |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 167 | /** |
KOSAKI Motohiro | fa18f7b | 2013-05-26 17:35:41 -0400 | [diff] [blame] | 168 | * cputimer_running - return true if cputimer is running |
| 169 | * |
| 170 | * @tsk: Pointer to target task. |
| 171 | */ |
| 172 | static inline bool cputimer_running(struct task_struct *tsk) |
| 173 | |
| 174 | { |
| 175 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
| 176 | |
Jason Low | 1018016 | 2015-04-28 13:00:22 -0700 | [diff] [blame] | 177 | /* Check if cputimer isn't running. This is accessed without locking. */ |
| 178 | if (!READ_ONCE(cputimer->running)) |
KOSAKI Motohiro | fa18f7b | 2013-05-26 17:35:41 -0400 | [diff] [blame] | 179 | return false; |
| 180 | |
| 181 | /* |
| 182 | * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime |
| 183 | * in __exit_signal(), we won't account to the signal struct further |
| 184 | * cputime consumed by that task, even though the task can still be |
| 185 | * ticking after __exit_signal(). |
| 186 | * |
| 187 | * In order to keep a consistent behaviour between thread group cputime |
| 188 | * and thread group cputimer accounting, lets also ignore the cputime |
| 189 | * elapsing after __exit_signal() in any thread group timer running. |
| 190 | * |
| 191 | * This makes sure that POSIX CPU clocks and timers are synchronized, so |
| 192 | * that a POSIX CPU timer won't expire while the corresponding POSIX CPU |
| 193 | * clock delta is behind the expiring timer value. |
| 194 | */ |
| 195 | if (unlikely(!tsk->sighand)) |
| 196 | return false; |
| 197 | |
| 198 | return true; |
| 199 | } |
| 200 | |
| 201 | /** |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 202 | * account_group_user_time - Maintain utime for a thread group. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 203 | * |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 204 | * @tsk: Pointer to task structure. |
| 205 | * @cputime: Time value by which to increment the utime field of the |
| 206 | * thread_group_cputime structure. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 207 | * |
| 208 | * If thread group time is being maintained, get the structure for the |
| 209 | * running CPU and update the utime field there. |
| 210 | */ |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 211 | static inline void account_group_user_time(struct task_struct *tsk, |
| 212 | cputime_t cputime) |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 213 | { |
Oleg Nesterov | 48286d508 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 214 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 215 | |
KOSAKI Motohiro | fa18f7b | 2013-05-26 17:35:41 -0400 | [diff] [blame] | 216 | if (!cputimer_running(tsk)) |
Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 217 | return; |
| 218 | |
Jason Low | 7110744 | 2015-04-28 13:00:24 -0700 | [diff] [blame] | 219 | atomic64_add(cputime, &cputimer->cputime_atomic.utime); |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | /** |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 223 | * account_group_system_time - Maintain stime for a thread group. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 224 | * |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 225 | * @tsk: Pointer to task structure. |
| 226 | * @cputime: Time value by which to increment the stime field of the |
| 227 | * thread_group_cputime structure. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 228 | * |
| 229 | * If thread group time is being maintained, get the structure for the |
| 230 | * running CPU and update the stime field there. |
| 231 | */ |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 232 | static inline void account_group_system_time(struct task_struct *tsk, |
| 233 | cputime_t cputime) |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 234 | { |
Oleg Nesterov | 48286d508 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 235 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 236 | |
KOSAKI Motohiro | fa18f7b | 2013-05-26 17:35:41 -0400 | [diff] [blame] | 237 | if (!cputimer_running(tsk)) |
Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 238 | return; |
| 239 | |
Jason Low | 7110744 | 2015-04-28 13:00:24 -0700 | [diff] [blame] | 240 | atomic64_add(cputime, &cputimer->cputime_atomic.stime); |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 241 | } |
| 242 | |
| 243 | /** |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 244 | * account_group_exec_runtime - Maintain exec runtime for a thread group. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 245 | * |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 246 | * @tsk: Pointer to task structure. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 247 | * @ns: Time value by which to increment the sum_exec_runtime field |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 248 | * of the thread_group_cputime structure. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 249 | * |
| 250 | * If thread group time is being maintained, get the structure for the |
| 251 | * running CPU and update the sum_exec_runtime field there. |
| 252 | */ |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 253 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
| 254 | unsigned long long ns) |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 255 | { |
Oleg Nesterov | 48286d508 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 256 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 257 | |
KOSAKI Motohiro | fa18f7b | 2013-05-26 17:35:41 -0400 | [diff] [blame] | 258 | if (!cputimer_running(tsk)) |
Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 259 | return; |
| 260 | |
Jason Low | 7110744 | 2015-04-28 13:00:24 -0700 | [diff] [blame] | 261 | atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 262 | } |