blob: 3918e01994e0a92bd734d22eebef7047e0a17a57 [file] [log] [blame]
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
Gregory Haskins398a1532009-01-14 09:10:04 -05006static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
7{
8 return container_of(rt_se, struct task_struct, rt);
9}
10
11#ifdef CONFIG_RT_GROUP_SCHED
12
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +020013#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
14
Gregory Haskins398a1532009-01-14 09:10:04 -050015static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
16{
17 return rt_rq->rq;
18}
19
20static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
21{
22 return rt_se->rt_rq;
23}
24
25#else /* CONFIG_RT_GROUP_SCHED */
26
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +020027#define rt_entity_is_task(rt_se) (1)
28
Gregory Haskins398a1532009-01-14 09:10:04 -050029static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
30{
31 return container_of(rt_rq, struct rq, rt);
32}
33
34static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
35{
36 struct task_struct *p = rt_task_of(rt_se);
37 struct rq *rq = task_rq(p);
38
39 return &rq->rt;
40}
41
42#endif /* CONFIG_RT_GROUP_SCHED */
43
Steven Rostedt4fd29172008-01-25 21:08:06 +010044#ifdef CONFIG_SMP
Ingo Molnar84de4272008-01-25 21:08:15 +010045
Gregory Haskins637f5082008-01-25 21:08:18 +010046static inline int rt_overloaded(struct rq *rq)
Steven Rostedt4fd29172008-01-25 21:08:06 +010047{
Gregory Haskins637f5082008-01-25 21:08:18 +010048 return atomic_read(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +010049}
Ingo Molnar84de4272008-01-25 21:08:15 +010050
Steven Rostedt4fd29172008-01-25 21:08:06 +010051static inline void rt_set_overload(struct rq *rq)
52{
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -040053 if (!rq->online)
54 return;
55
Rusty Russellc6c49272008-11-25 02:35:05 +103056 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +010057 /*
58 * Make sure the mask is visible before we set
59 * the overload count. That is checked to determine
60 * if we should look at the mask. It would be a shame
61 * if we looked at the mask, but the mask was not
62 * updated yet.
63 */
64 wmb();
Gregory Haskins637f5082008-01-25 21:08:18 +010065 atomic_inc(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +010066}
Ingo Molnar84de4272008-01-25 21:08:15 +010067
Steven Rostedt4fd29172008-01-25 21:08:06 +010068static inline void rt_clear_overload(struct rq *rq)
69{
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -040070 if (!rq->online)
71 return;
72
Steven Rostedt4fd29172008-01-25 21:08:06 +010073 /* the order here really doesn't matter */
Gregory Haskins637f5082008-01-25 21:08:18 +010074 atomic_dec(&rq->rd->rto_count);
Rusty Russellc6c49272008-11-25 02:35:05 +103075 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +010076}
Gregory Haskins73fe6aae2008-01-25 21:08:07 +010077
Gregory Haskins398a1532009-01-14 09:10:04 -050078static void update_rt_migration(struct rt_rq *rt_rq)
Gregory Haskins73fe6aae2008-01-25 21:08:07 +010079{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +020080 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
Gregory Haskins398a1532009-01-14 09:10:04 -050081 if (!rt_rq->overloaded) {
82 rt_set_overload(rq_of_rt_rq(rt_rq));
83 rt_rq->overloaded = 1;
Gregory Haskinscdc8eb92008-01-25 21:08:23 +010084 }
Gregory Haskins398a1532009-01-14 09:10:04 -050085 } else if (rt_rq->overloaded) {
86 rt_clear_overload(rq_of_rt_rq(rt_rq));
87 rt_rq->overloaded = 0;
Gregory Haskins637f5082008-01-25 21:08:18 +010088 }
Gregory Haskins73fe6aae2008-01-25 21:08:07 +010089}
Steven Rostedt4fd29172008-01-25 21:08:06 +010090
Gregory Haskins398a1532009-01-14 09:10:04 -050091static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
Peter Zijlstrafa85ae22008-01-25 21:08:29 +010092{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +020093 if (!rt_entity_is_task(rt_se))
94 return;
95
96 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
97
98 rt_rq->rt_nr_total++;
Gregory Haskins398a1532009-01-14 09:10:04 -050099 if (rt_se->nr_cpus_allowed > 1)
100 rt_rq->rt_nr_migratory++;
101
102 update_rt_migration(rt_rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100103}
104
Gregory Haskins398a1532009-01-14 09:10:04 -0500105static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
106{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200107 if (!rt_entity_is_task(rt_se))
108 return;
109
110 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
111
112 rt_rq->rt_nr_total--;
Gregory Haskins398a1532009-01-14 09:10:04 -0500113 if (rt_se->nr_cpus_allowed > 1)
114 rt_rq->rt_nr_migratory--;
115
116 update_rt_migration(rt_rq);
117}
118
Gregory Haskins917b6272008-12-29 09:39:53 -0500119static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
120{
121 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
122 plist_node_init(&p->pushable_tasks, p->prio);
123 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
124}
125
126static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
127{
128 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
129}
130
131#else
132
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100133static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
134{
135}
136
137static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
138{
139}
140
Gregory Haskinsb07430a2009-01-14 08:55:39 -0500141static inline
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100142void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
143{
144}
145
Gregory Haskinsb07430a2009-01-14 08:55:39 -0500146static inline
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100147void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
148{
149}
Gregory Haskins917b6272008-12-29 09:39:53 -0500150
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200151#endif /* CONFIG_SMP */
152
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100153static inline int on_rt_rq(struct sched_rt_entity *rt_se)
154{
155 return !list_empty(&rt_se->run_list);
156}
157
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100158#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100159
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100160static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100161{
162 if (!rt_rq->tg)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100163 return RUNTIME_INF;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100164
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200165 return rt_rq->rt_runtime;
166}
167
168static inline u64 sched_rt_period(struct rt_rq *rt_rq)
169{
170 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100171}
172
173#define for_each_leaf_rt_rq(rt_rq, rq) \
Bharata B Rao80f40ee2008-12-15 11:56:48 +0530174 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100175
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100176#define for_each_sched_rt_entity(rt_se) \
177 for (; rt_se; rt_se = rt_se->parent)
178
179static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
180{
181 return rt_se->my_q;
182}
183
184static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
185static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
186
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100187static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100188{
Dario Faggiolif6121f42008-10-03 17:40:46 +0200189 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100190 struct sched_rt_entity *rt_se = rt_rq->rt_se;
191
Dario Faggiolif6121f42008-10-03 17:40:46 +0200192 if (rt_rq->rt_nr_running) {
193 if (rt_se && !on_rt_rq(rt_se))
194 enqueue_rt_entity(rt_se);
Gregory Haskinse864c492008-12-29 09:39:49 -0500195 if (rt_rq->highest_prio.curr < curr->prio)
Peter Zijlstra10203872008-01-25 21:08:32 +0100196 resched_task(curr);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100197 }
198}
199
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100200static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100201{
202 struct sched_rt_entity *rt_se = rt_rq->rt_se;
203
204 if (rt_se && on_rt_rq(rt_se))
205 dequeue_rt_entity(rt_se);
206}
207
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100208static inline int rt_rq_throttled(struct rt_rq *rt_rq)
209{
210 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
211}
212
213static int rt_se_boosted(struct sched_rt_entity *rt_se)
214{
215 struct rt_rq *rt_rq = group_rt_rq(rt_se);
216 struct task_struct *p;
217
218 if (rt_rq)
219 return !!rt_rq->rt_nr_boosted;
220
221 p = rt_task_of(rt_se);
222 return p->prio != p->normal_prio;
223}
224
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200225#ifdef CONFIG_SMP
Rusty Russellc6c49272008-11-25 02:35:05 +1030226static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200227{
228 return cpu_rq(smp_processor_id())->rd->span;
229}
230#else
Rusty Russellc6c49272008-11-25 02:35:05 +1030231static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200232{
Rusty Russellc6c49272008-11-25 02:35:05 +1030233 return cpu_online_mask;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200234}
235#endif
236
237static inline
238struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
239{
240 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
241}
242
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200243static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
244{
245 return &rt_rq->tg->rt_bandwidth;
246}
247
Dhaval Giani55e12e52008-06-24 23:39:43 +0530248#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100249
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100250static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100251{
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200252 return rt_rq->rt_runtime;
253}
254
255static inline u64 sched_rt_period(struct rt_rq *rt_rq)
256{
257 return ktime_to_ns(def_rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100258}
259
260#define for_each_leaf_rt_rq(rt_rq, rq) \
261 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
262
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100263#define for_each_sched_rt_entity(rt_se) \
264 for (; rt_se; rt_se = NULL)
265
266static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
267{
268 return NULL;
269}
270
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100271static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100272{
John Blackwoodf3ade832008-08-26 15:09:43 -0400273 if (rt_rq->rt_nr_running)
274 resched_task(rq_of_rt_rq(rt_rq)->curr);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100275}
276
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100277static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100278{
279}
280
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100281static inline int rt_rq_throttled(struct rt_rq *rt_rq)
282{
283 return rt_rq->rt_throttled;
284}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200285
Rusty Russellc6c49272008-11-25 02:35:05 +1030286static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200287{
Rusty Russellc6c49272008-11-25 02:35:05 +1030288 return cpu_online_mask;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200289}
290
291static inline
292struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
293{
294 return &cpu_rq(cpu)->rt;
295}
296
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200297static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
298{
299 return &def_rt_bandwidth;
300}
301
Dhaval Giani55e12e52008-06-24 23:39:43 +0530302#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100303
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200304#ifdef CONFIG_SMP
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200305/*
306 * We ran out of runtime, see if we can borrow some from our neighbours.
307 */
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200308static int do_balance_runtime(struct rt_rq *rt_rq)
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200309{
310 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
311 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
312 int i, weight, more = 0;
313 u64 rt_period;
314
Rusty Russellc6c49272008-11-25 02:35:05 +1030315 weight = cpumask_weight(rd->span);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200316
317 spin_lock(&rt_b->rt_runtime_lock);
318 rt_period = ktime_to_ns(rt_b->rt_period);
Rusty Russellc6c49272008-11-25 02:35:05 +1030319 for_each_cpu(i, rd->span) {
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200320 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
321 s64 diff;
322
323 if (iter == rt_rq)
324 continue;
325
326 spin_lock(&iter->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200327 /*
328 * Either all rqs have inf runtime and there's nothing to steal
329 * or __disable_runtime() below sets a specific rq to inf to
330 * indicate its been disabled and disalow stealing.
331 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200332 if (iter->rt_runtime == RUNTIME_INF)
333 goto next;
334
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200335 /*
336 * From runqueues with spare time, take 1/n part of their
337 * spare time, but no more than our period.
338 */
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200339 diff = iter->rt_runtime - iter->rt_time;
340 if (diff > 0) {
Peter Zijlstra58838cf2008-07-24 12:43:13 +0200341 diff = div_u64((u64)diff, weight);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200342 if (rt_rq->rt_runtime + diff > rt_period)
343 diff = rt_period - rt_rq->rt_runtime;
344 iter->rt_runtime -= diff;
345 rt_rq->rt_runtime += diff;
346 more = 1;
347 if (rt_rq->rt_runtime == rt_period) {
348 spin_unlock(&iter->rt_runtime_lock);
349 break;
350 }
351 }
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200352next:
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200353 spin_unlock(&iter->rt_runtime_lock);
354 }
355 spin_unlock(&rt_b->rt_runtime_lock);
356
357 return more;
358}
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200359
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200360/*
361 * Ensure this RQ takes back all the runtime it lend to its neighbours.
362 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200363static void __disable_runtime(struct rq *rq)
364{
365 struct root_domain *rd = rq->rd;
366 struct rt_rq *rt_rq;
367
368 if (unlikely(!scheduler_running))
369 return;
370
371 for_each_leaf_rt_rq(rt_rq, rq) {
372 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
373 s64 want;
374 int i;
375
376 spin_lock(&rt_b->rt_runtime_lock);
377 spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200378 /*
379 * Either we're all inf and nobody needs to borrow, or we're
380 * already disabled and thus have nothing to do, or we have
381 * exactly the right amount of runtime to take out.
382 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200383 if (rt_rq->rt_runtime == RUNTIME_INF ||
384 rt_rq->rt_runtime == rt_b->rt_runtime)
385 goto balanced;
386 spin_unlock(&rt_rq->rt_runtime_lock);
387
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200388 /*
389 * Calculate the difference between what we started out with
390 * and what we current have, that's the amount of runtime
391 * we lend and now have to reclaim.
392 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200393 want = rt_b->rt_runtime - rt_rq->rt_runtime;
394
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200395 /*
396 * Greedy reclaim, take back as much as we can.
397 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030398 for_each_cpu(i, rd->span) {
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200399 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
400 s64 diff;
401
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200402 /*
403 * Can't reclaim from ourselves or disabled runqueues.
404 */
Peter Zijlstraf1679d02008-08-14 15:49:00 +0200405 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200406 continue;
407
408 spin_lock(&iter->rt_runtime_lock);
409 if (want > 0) {
410 diff = min_t(s64, iter->rt_runtime, want);
411 iter->rt_runtime -= diff;
412 want -= diff;
413 } else {
414 iter->rt_runtime -= want;
415 want -= want;
416 }
417 spin_unlock(&iter->rt_runtime_lock);
418
419 if (!want)
420 break;
421 }
422
423 spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200424 /*
425 * We cannot be left wanting - that would mean some runtime
426 * leaked out of the system.
427 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200428 BUG_ON(want);
429balanced:
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200430 /*
431 * Disable all the borrow logic by pretending we have inf
432 * runtime - in which case borrowing doesn't make sense.
433 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200434 rt_rq->rt_runtime = RUNTIME_INF;
435 spin_unlock(&rt_rq->rt_runtime_lock);
436 spin_unlock(&rt_b->rt_runtime_lock);
437 }
438}
439
440static void disable_runtime(struct rq *rq)
441{
442 unsigned long flags;
443
444 spin_lock_irqsave(&rq->lock, flags);
445 __disable_runtime(rq);
446 spin_unlock_irqrestore(&rq->lock, flags);
447}
448
449static void __enable_runtime(struct rq *rq)
450{
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200451 struct rt_rq *rt_rq;
452
453 if (unlikely(!scheduler_running))
454 return;
455
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200456 /*
457 * Reset each runqueue's bandwidth settings
458 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200459 for_each_leaf_rt_rq(rt_rq, rq) {
460 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
461
462 spin_lock(&rt_b->rt_runtime_lock);
463 spin_lock(&rt_rq->rt_runtime_lock);
464 rt_rq->rt_runtime = rt_b->rt_runtime;
465 rt_rq->rt_time = 0;
Zhang, Yanminbaf25732008-09-09 11:26:33 +0800466 rt_rq->rt_throttled = 0;
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200467 spin_unlock(&rt_rq->rt_runtime_lock);
468 spin_unlock(&rt_b->rt_runtime_lock);
469 }
470}
471
472static void enable_runtime(struct rq *rq)
473{
474 unsigned long flags;
475
476 spin_lock_irqsave(&rq->lock, flags);
477 __enable_runtime(rq);
478 spin_unlock_irqrestore(&rq->lock, flags);
479}
480
Peter Zijlstraeff65492008-06-19 14:22:26 +0200481static int balance_runtime(struct rt_rq *rt_rq)
482{
483 int more = 0;
484
485 if (rt_rq->rt_time > rt_rq->rt_runtime) {
486 spin_unlock(&rt_rq->rt_runtime_lock);
487 more = do_balance_runtime(rt_rq);
488 spin_lock(&rt_rq->rt_runtime_lock);
489 }
490
491 return more;
492}
Dhaval Giani55e12e52008-06-24 23:39:43 +0530493#else /* !CONFIG_SMP */
Peter Zijlstraeff65492008-06-19 14:22:26 +0200494static inline int balance_runtime(struct rt_rq *rt_rq)
495{
496 return 0;
497}
Dhaval Giani55e12e52008-06-24 23:39:43 +0530498#endif /* CONFIG_SMP */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100499
500static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
501{
502 int i, idle = 1;
Rusty Russellc6c49272008-11-25 02:35:05 +1030503 const struct cpumask *span;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200504
Peter Zijlstra0b148fa2008-08-19 12:33:04 +0200505 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200506 return 1;
507
508 span = sched_rt_period_mask();
Rusty Russellc6c49272008-11-25 02:35:05 +1030509 for_each_cpu(i, span) {
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200510 int enqueue = 0;
511 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
512 struct rq *rq = rq_of_rt_rq(rt_rq);
513
514 spin_lock(&rq->lock);
515 if (rt_rq->rt_time) {
516 u64 runtime;
517
518 spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200519 if (rt_rq->rt_throttled)
520 balance_runtime(rt_rq);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200521 runtime = rt_rq->rt_runtime;
522 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
523 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
524 rt_rq->rt_throttled = 0;
525 enqueue = 1;
526 }
527 if (rt_rq->rt_time || rt_rq->rt_nr_running)
528 idle = 0;
529 spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstra8a8cde12008-06-19 14:22:28 +0200530 } else if (rt_rq->rt_nr_running)
531 idle = 0;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200532
533 if (enqueue)
534 sched_rt_rq_enqueue(rt_rq);
535 spin_unlock(&rq->lock);
536 }
537
538 return idle;
539}
540
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100541static inline int rt_se_prio(struct sched_rt_entity *rt_se)
542{
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100543#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100544 struct rt_rq *rt_rq = group_rt_rq(rt_se);
545
546 if (rt_rq)
Gregory Haskinse864c492008-12-29 09:39:49 -0500547 return rt_rq->highest_prio.curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100548#endif
549
550 return rt_task_of(rt_se)->prio;
551}
552
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100553static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100554{
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100555 u64 runtime = sched_rt_runtime(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100556
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100557 if (rt_rq->rt_throttled)
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100558 return rt_rq_throttled(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100559
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200560 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
561 return 0;
562
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200563 balance_runtime(rt_rq);
564 runtime = sched_rt_runtime(rt_rq);
565 if (runtime == RUNTIME_INF)
566 return 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200567
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100568 if (rt_rq->rt_time > runtime) {
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100569 rt_rq->rt_throttled = 1;
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100570 if (rt_rq_throttled(rt_rq)) {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100571 sched_rt_rq_dequeue(rt_rq);
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100572 return 1;
573 }
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100574 }
575
576 return 0;
577}
578
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200579/*
580 * Update the current task's runtime statistics. Skip current tasks that
581 * are not in our scheduling class.
582 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200583static void update_curr_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200584{
585 struct task_struct *curr = rq->curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100586 struct sched_rt_entity *rt_se = &curr->rt;
587 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200588 u64 delta_exec;
589
590 if (!task_has_rt_policy(curr))
591 return;
592
Ingo Molnard2819182007-08-09 11:16:47 +0200593 delta_exec = rq->clock - curr->se.exec_start;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200594 if (unlikely((s64)delta_exec < 0))
595 delta_exec = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200596
597 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200598
599 curr->se.sum_exec_runtime += delta_exec;
Frank Mayharf06febc2008-09-12 09:54:39 -0700600 account_group_exec_runtime(curr, delta_exec);
601
Ingo Molnard2819182007-08-09 11:16:47 +0200602 curr->se.exec_start = rq->clock;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100603 cpuacct_charge(curr, delta_exec);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100604
Peter Zijlstra0b148fa2008-08-19 12:33:04 +0200605 if (!rt_bandwidth_enabled())
606 return;
607
Dhaval Giani354d60c2008-04-19 19:44:59 +0200608 for_each_sched_rt_entity(rt_se) {
609 rt_rq = rt_rq_of_se(rt_se);
610
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200611 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
Dimitri Sivaniche113a742008-10-31 08:03:41 -0500612 spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200613 rt_rq->rt_time += delta_exec;
614 if (sched_rt_runtime_exceeded(rt_rq))
615 resched_task(curr);
Dimitri Sivaniche113a742008-10-31 08:03:41 -0500616 spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200617 }
Dhaval Giani354d60c2008-04-19 19:44:59 +0200618 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200619}
620
Gregory Haskins398a1532009-01-14 09:10:04 -0500621#if defined CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500622
623static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
624
625static inline int next_prio(struct rq *rq)
Steven Rostedt63489e42008-01-25 21:08:03 +0100626{
Gregory Haskinse864c492008-12-29 09:39:49 -0500627 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -0400628
Gregory Haskinse864c492008-12-29 09:39:49 -0500629 if (next && rt_prio(next->prio))
630 return next->prio;
631 else
632 return MAX_RT_PRIO;
633}
Gregory Haskinse864c492008-12-29 09:39:49 -0500634
Gregory Haskins398a1532009-01-14 09:10:04 -0500635static void
636inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200637{
Gregory Haskins4d984272008-12-29 09:39:49 -0500638 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins4d984272008-12-29 09:39:49 -0500639
Gregory Haskins398a1532009-01-14 09:10:04 -0500640 if (prio < prev_prio) {
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100641
Gregory Haskinse864c492008-12-29 09:39:49 -0500642 /*
643 * If the new task is higher in priority than anything on the
Gregory Haskins398a1532009-01-14 09:10:04 -0500644 * run-queue, we know that the previous high becomes our
645 * next-highest.
Gregory Haskinse864c492008-12-29 09:39:49 -0500646 */
Gregory Haskins398a1532009-01-14 09:10:04 -0500647 rt_rq->highest_prio.next = prev_prio;
648
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -0400649 if (rq->online)
Gregory Haskins4d984272008-12-29 09:39:49 -0500650 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
Ingo Molnar1100ac92008-06-05 12:25:37 +0200651
Gregory Haskinse864c492008-12-29 09:39:49 -0500652 } else if (prio == rt_rq->highest_prio.curr)
653 /*
654 * If the next task is equal in priority to the highest on
655 * the run-queue, then we implicitly know that the next highest
656 * task cannot be any lower than current
657 */
658 rt_rq->highest_prio.next = prio;
659 else if (prio < rt_rq->highest_prio.next)
660 /*
661 * Otherwise, we need to recompute next-highest
662 */
663 rt_rq->highest_prio.next = next_prio(rq);
Steven Rostedt63489e42008-01-25 21:08:03 +0100664}
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100665
Gregory Haskins398a1532009-01-14 09:10:04 -0500666static void
667dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
Steven Rostedt63489e42008-01-25 21:08:03 +0100668{
Gregory Haskins4d984272008-12-29 09:39:49 -0500669 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200670
Gregory Haskins398a1532009-01-14 09:10:04 -0500671 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
672 rt_rq->highest_prio.next = next_prio(rq);
673
674 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
675 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
676}
677
678#else /* CONFIG_SMP */
679
680static inline
681void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
682static inline
683void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
684
685#endif /* CONFIG_SMP */
686
Steven Rostedt63489e42008-01-25 21:08:03 +0100687#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskins398a1532009-01-14 09:10:04 -0500688static void
689inc_rt_prio(struct rt_rq *rt_rq, int prio)
690{
691 int prev_prio = rt_rq->highest_prio.curr;
Steven Rostedt63489e42008-01-25 21:08:03 +0100692
Gregory Haskins398a1532009-01-14 09:10:04 -0500693 if (prio < prev_prio)
694 rt_rq->highest_prio.curr = prio;
695
696 inc_rt_prio_smp(rt_rq, prio, prev_prio);
697}
698
699static void
700dec_rt_prio(struct rt_rq *rt_rq, int prio)
701{
702 int prev_prio = rt_rq->highest_prio.curr;
703
704 if (rt_rq->rt_nr_running) {
705
706 WARN_ON(prio < prev_prio);
Gregory Haskinse864c492008-12-29 09:39:49 -0500707
708 /*
Gregory Haskins398a1532009-01-14 09:10:04 -0500709 * This may have been our highest task, and therefore
710 * we may have some recomputation to do
Gregory Haskinse864c492008-12-29 09:39:49 -0500711 */
Gregory Haskins398a1532009-01-14 09:10:04 -0500712 if (prio == prev_prio) {
Gregory Haskinse864c492008-12-29 09:39:49 -0500713 struct rt_prio_array *array = &rt_rq->active;
714
715 rt_rq->highest_prio.curr =
Steven Rostedt764a9d62008-01-25 21:08:04 +0100716 sched_find_first_bit(array->bitmap);
Gregory Haskinse864c492008-12-29 09:39:49 -0500717 }
718
Steven Rostedt764a9d62008-01-25 21:08:04 +0100719 } else
Gregory Haskinse864c492008-12-29 09:39:49 -0500720 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins73fe6aae2008-01-25 21:08:07 +0100721
Gregory Haskins398a1532009-01-14 09:10:04 -0500722 dec_rt_prio_smp(rt_rq, prio, prev_prio);
723}
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -0400724
Gregory Haskins398a1532009-01-14 09:10:04 -0500725#else
726
727static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
728static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
729
730#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
731
Gregory Haskins73fe6aae2008-01-25 21:08:07 +0100732#ifdef CONFIG_RT_GROUP_SCHED
Gregory Haskins398a1532009-01-14 09:10:04 -0500733
734static void
735inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
736{
Gregory Haskins73fe6aae2008-01-25 21:08:07 +0100737 if (rt_se_boosted(rt_se))
Steven Rostedt764a9d62008-01-25 21:08:04 +0100738 rt_rq->rt_nr_boosted++;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100739
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100740 if (rt_rq->tg)
741 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
Gregory Haskins398a1532009-01-14 09:10:04 -0500742}
743
744static void
745dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
746{
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100747 if (rt_se_boosted(rt_se))
748 rt_rq->rt_nr_boosted--;
749
750 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
Gregory Haskins398a1532009-01-14 09:10:04 -0500751}
752
753#else /* CONFIG_RT_GROUP_SCHED */
754
755static void
756inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
757{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200758 start_rt_bandwidth(&def_rt_bandwidth);
Gregory Haskins398a1532009-01-14 09:10:04 -0500759}
760
761static inline
762void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
763
764#endif /* CONFIG_RT_GROUP_SCHED */
765
766static inline
767void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
768{
769 int prio = rt_se_prio(rt_se);
770
771 WARN_ON(!rt_prio(prio));
772 rt_rq->rt_nr_running++;
773
774 inc_rt_prio(rt_rq, prio);
775 inc_rt_migration(rt_se, rt_rq);
776 inc_rt_group(rt_se, rt_rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200777}
778
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100779static inline
780void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
781{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200782 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100783 WARN_ON(!rt_rq->rt_nr_running);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200784 rt_rq->rt_nr_running--;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200785
Gregory Haskins398a1532009-01-14 09:10:04 -0500786 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
787 dec_rt_migration(rt_se, rt_rq);
788 dec_rt_group(rt_se, rt_rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200789}
790
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200791static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200792{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100793 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
794 struct rt_prio_array *array = &rt_rq->active;
795 struct rt_rq *group_rq = group_rt_rq(rt_se);
Dmitry Adamushko20b63312008-06-11 00:58:30 +0200796 struct list_head *queue = array->queue + rt_se_prio(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200797
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200798 /*
799 * Don't enqueue the group if its throttled, or when empty.
800 * The latter is a consequence of the former when a child group
801 * get throttled and the current group doesn't have any other
802 * active members.
803 */
804 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100805 return;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200806
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200807 list_add_tail(&rt_se->run_list, queue);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100808 __set_bit(rt_se_prio(rt_se), array->bitmap);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100809
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100810 inc_rt_tasks(rt_se, rt_rq);
811}
812
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200813static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100814{
815 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
816 struct rt_prio_array *array = &rt_rq->active;
817
818 list_del_init(&rt_se->run_list);
819 if (list_empty(array->queue + rt_se_prio(rt_se)))
820 __clear_bit(rt_se_prio(rt_se), array->bitmap);
821
822 dec_rt_tasks(rt_se, rt_rq);
823}
824
825/*
826 * Because the prio of an upper entry depends on the lower
827 * entries, we must remove entries top - down.
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100828 */
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200829static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100830{
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200831 struct sched_rt_entity *back = NULL;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100832
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200833 for_each_sched_rt_entity(rt_se) {
834 rt_se->back = back;
835 back = rt_se;
836 }
837
838 for (rt_se = back; rt_se; rt_se = rt_se->back) {
839 if (on_rt_rq(rt_se))
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200840 __dequeue_rt_entity(rt_se);
841 }
842}
843
844static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
845{
846 dequeue_rt_stack(rt_se);
847 for_each_sched_rt_entity(rt_se)
848 __enqueue_rt_entity(rt_se);
849}
850
851static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
852{
853 dequeue_rt_stack(rt_se);
854
855 for_each_sched_rt_entity(rt_se) {
856 struct rt_rq *rt_rq = group_rt_rq(rt_se);
857
858 if (rt_rq && rt_rq->rt_nr_running)
859 __enqueue_rt_entity(rt_se);
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200860 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200861}
862
863/*
864 * Adding/removing a task to/from a priority array:
865 */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100866static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
867{
868 struct sched_rt_entity *rt_se = &p->rt;
869
870 if (wakeup)
871 rt_se->timeout = 0;
872
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200873 enqueue_rt_entity(rt_se);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200874
Gregory Haskins917b6272008-12-29 09:39:53 -0500875 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
876 enqueue_pushable_task(rq, p);
877
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200878 inc_cpu_load(rq, p->se.load.weight);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100879}
880
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200881static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
882{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100883 struct sched_rt_entity *rt_se = &p->rt;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200884
885 update_curr_rt(rq);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200886 dequeue_rt_entity(rt_se);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200887
Gregory Haskins917b6272008-12-29 09:39:53 -0500888 dequeue_pushable_task(rq, p);
889
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200890 dec_cpu_load(rq, p->se.load.weight);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200891}
892
893/*
894 * Put task to the end of the run list without the overhead of dequeue
895 * followed by enqueue.
896 */
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200897static void
898requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200899{
Ingo Molnar1cdad712008-06-19 09:09:15 +0200900 if (on_rt_rq(rt_se)) {
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200901 struct rt_prio_array *array = &rt_rq->active;
902 struct list_head *queue = array->queue + rt_se_prio(rt_se);
903
904 if (head)
905 list_move(&rt_se->run_list, queue);
906 else
907 list_move_tail(&rt_se->run_list, queue);
Ingo Molnar1cdad712008-06-19 09:09:15 +0200908 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200909}
910
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200911static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100912{
913 struct sched_rt_entity *rt_se = &p->rt;
914 struct rt_rq *rt_rq;
915
916 for_each_sched_rt_entity(rt_se) {
917 rt_rq = rt_rq_of_se(rt_se);
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200918 requeue_rt_entity(rt_rq, rt_se, head);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100919 }
920}
921
922static void yield_task_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200923{
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200924 requeue_task_rt(rq, rq->curr, 0);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200925}
926
Gregory Haskinse7693a32008-01-25 21:08:09 +0100927#ifdef CONFIG_SMP
Gregory Haskins318e0892008-01-25 21:08:10 +0100928static int find_lowest_rq(struct task_struct *task);
929
Gregory Haskinse7693a32008-01-25 21:08:09 +0100930static int select_task_rq_rt(struct task_struct *p, int sync)
931{
Gregory Haskins318e0892008-01-25 21:08:10 +0100932 struct rq *rq = task_rq(p);
933
934 /*
Steven Rostedte1f47d82008-01-25 21:08:12 +0100935 * If the current task is an RT task, then
936 * try to see if we can wake this RT task up on another
937 * runqueue. Otherwise simply start this RT task
938 * on its current runqueue.
939 *
940 * We want to avoid overloading runqueues. Even if
941 * the RT task is of higher priority than the current RT task.
942 * RT tasks behave differently than other tasks. If
943 * one gets preempted, we try to push it off to another queue.
944 * So trying to keep a preempting RT task on the same
945 * cache hot CPU will force the running RT task to
946 * a cold CPU. So we waste all the cache for the lower
947 * RT task in hopes of saving some of a RT task
948 * that is just being woken and probably will have
949 * cold cache anyway.
Gregory Haskins318e0892008-01-25 21:08:10 +0100950 */
Gregory Haskins17b32792008-01-25 21:08:13 +0100951 if (unlikely(rt_task(rq->curr)) &&
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100952 (p->rt.nr_cpus_allowed > 1)) {
Gregory Haskins318e0892008-01-25 21:08:10 +0100953 int cpu = find_lowest_rq(p);
954
955 return (cpu == -1) ? task_cpu(p) : cpu;
956 }
957
958 /*
959 * Otherwise, just let it ride on the affined RQ and the
960 * post-schedule router will push the preempted task away
961 */
Gregory Haskinse7693a32008-01-25 21:08:09 +0100962 return task_cpu(p);
963}
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200964
965static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
966{
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200967 if (rq->curr->rt.nr_cpus_allowed == 1)
968 return;
969
Rusty Russell13b8bd02009-03-25 15:01:22 +1030970 if (p->rt.nr_cpus_allowed != 1
971 && cpupri_find(&rq->rd->cpupri, p, NULL))
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200972 return;
973
Rusty Russell13b8bd02009-03-25 15:01:22 +1030974 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
975 return;
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200976
977 /*
978 * There appears to be other cpus that can accept
979 * current and none to run 'p', so lets reschedule
980 * to try and push current away:
981 */
982 requeue_task_rt(rq, p, 1);
983 resched_task(rq->curr);
984}
985
Gregory Haskinse7693a32008-01-25 21:08:09 +0100986#endif /* CONFIG_SMP */
987
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200988/*
989 * Preempt the current task with a newly woken task if needed:
990 */
Peter Zijlstra15afe092008-09-20 23:38:02 +0200991static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200992{
Gregory Haskins45c01e82008-05-12 21:20:41 +0200993 if (p->prio < rq->curr->prio) {
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200994 resched_task(rq->curr);
Gregory Haskins45c01e82008-05-12 21:20:41 +0200995 return;
996 }
997
998#ifdef CONFIG_SMP
999 /*
1000 * If:
1001 *
1002 * - the newly woken task is of equal priority to the current task
1003 * - the newly woken task is non-migratable while current is migratable
1004 * - current will be preempted on the next reschedule
1005 *
1006 * we should check to see if current can readily move to a different
1007 * cpu. If so, we will reschedule to allow the push logic to try
1008 * to move current somewhere else, making room for our non-migratable
1009 * task.
1010 */
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001011 if (p->prio == rq->curr->prio && !need_resched())
1012 check_preempt_equal_prio(rq, p);
Gregory Haskins45c01e82008-05-12 21:20:41 +02001013#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001014}
1015
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001016static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1017 struct rt_rq *rt_rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001018{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001019 struct rt_prio_array *array = &rt_rq->active;
1020 struct sched_rt_entity *next = NULL;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001021 struct list_head *queue;
1022 int idx;
1023
1024 idx = sched_find_first_bit(array->bitmap);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001025 BUG_ON(idx >= MAX_RT_PRIO);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001026
1027 queue = array->queue + idx;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001028 next = list_entry(queue->next, struct sched_rt_entity, run_list);
Dmitry Adamushko326587b2008-01-25 21:08:34 +01001029
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001030 return next;
1031}
1032
Gregory Haskins917b6272008-12-29 09:39:53 -05001033static struct task_struct *_pick_next_task_rt(struct rq *rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001034{
1035 struct sched_rt_entity *rt_se;
1036 struct task_struct *p;
1037 struct rt_rq *rt_rq;
1038
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001039 rt_rq = &rq->rt;
1040
1041 if (unlikely(!rt_rq->rt_nr_running))
1042 return NULL;
1043
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01001044 if (rt_rq_throttled(rt_rq))
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001045 return NULL;
1046
1047 do {
1048 rt_se = pick_next_rt_entity(rq, rt_rq);
Dmitry Adamushko326587b2008-01-25 21:08:34 +01001049 BUG_ON(!rt_se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001050 rt_rq = group_rt_rq(rt_se);
1051 } while (rt_rq);
1052
1053 p = rt_task_of(rt_se);
1054 p->se.exec_start = rq->clock;
Gregory Haskins917b6272008-12-29 09:39:53 -05001055
1056 return p;
1057}
1058
1059static struct task_struct *pick_next_task_rt(struct rq *rq)
1060{
1061 struct task_struct *p = _pick_next_task_rt(rq);
1062
1063 /* The running task is never eligible for pushing */
1064 if (p)
1065 dequeue_pushable_task(rq, p);
1066
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001067 return p;
1068}
1069
Ingo Molnar31ee5292007-08-09 11:16:49 +02001070static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001071{
Ingo Molnarf1e14ef2007-08-09 11:16:48 +02001072 update_curr_rt(rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001073 p->se.exec_start = 0;
Gregory Haskins917b6272008-12-29 09:39:53 -05001074
1075 /*
1076 * The previous task needs to be made eligible for pushing
1077 * if it is still active
1078 */
1079 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1080 enqueue_pushable_task(rq, p);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001081}
1082
Peter Williams681f3e62007-10-24 18:23:51 +02001083#ifdef CONFIG_SMP
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001084
Steven Rostedte8fa1362008-01-25 21:08:05 +01001085/* Only try algorithms three times */
1086#define RT_MAX_TRIES 3
1087
Steven Rostedte8fa1362008-01-25 21:08:05 +01001088static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1089
Steven Rostedtf65eda42008-01-25 21:08:07 +01001090static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1091{
1092 if (!task_running(rq, p) &&
Rusty Russell96f874e22008-11-25 02:35:14 +10301093 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001094 (p->rt.nr_cpus_allowed > 1))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001095 return 1;
1096 return 0;
1097}
1098
Steven Rostedte8fa1362008-01-25 21:08:05 +01001099/* Return the second highest RT task, NULL otherwise */
Ingo Molnar79064fb2008-01-25 21:08:14 +01001100static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001101{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001102 struct task_struct *next = NULL;
1103 struct sched_rt_entity *rt_se;
1104 struct rt_prio_array *array;
1105 struct rt_rq *rt_rq;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001106 int idx;
1107
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001108 for_each_leaf_rt_rq(rt_rq, rq) {
1109 array = &rt_rq->active;
1110 idx = sched_find_first_bit(array->bitmap);
1111 next_idx:
1112 if (idx >= MAX_RT_PRIO)
1113 continue;
1114 if (next && next->prio < idx)
1115 continue;
1116 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1117 struct task_struct *p = rt_task_of(rt_se);
1118 if (pick_rt_task(rq, p, cpu)) {
1119 next = p;
1120 break;
1121 }
1122 }
1123 if (!next) {
1124 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1125 goto next_idx;
1126 }
Steven Rostedte8fa1362008-01-25 21:08:05 +01001127 }
1128
Steven Rostedte8fa1362008-01-25 21:08:05 +01001129 return next;
1130}
1131
Rusty Russell0e3900e2008-11-25 02:35:13 +10301132static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001133
Mike Travisd38b2232009-01-10 21:58:11 -08001134static inline int pick_optimal_cpu(int this_cpu,
1135 const struct cpumask *mask)
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001136{
1137 int first;
1138
1139 /* "this_cpu" is cheaper to preempt than a remote processor */
Mike Travisd38b2232009-01-10 21:58:11 -08001140 if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001141 return this_cpu;
1142
Rusty Russell3d398702009-01-31 23:21:24 +10301143 first = cpumask_first(mask);
1144 if (first < nr_cpu_ids)
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001145 return first;
1146
1147 return -1;
1148}
1149
1150static int find_lowest_rq(struct task_struct *task)
1151{
1152 struct sched_domain *sd;
Rusty Russell96f874e22008-11-25 02:35:14 +10301153 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001154 int this_cpu = smp_processor_id();
1155 int cpu = task_cpu(task);
Mike Travisd38b2232009-01-10 21:58:11 -08001156 cpumask_var_t domain_mask;
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001157
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001158 if (task->rt.nr_cpus_allowed == 1)
1159 return -1; /* No other targets possible */
1160
1161 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
Gregory Haskins06f90db2008-01-25 21:08:13 +01001162 return -1; /* No targets found */
1163
1164 /*
Max Krasnyanskye761b772008-07-15 04:43:49 -07001165 * Only consider CPUs that are usable for migration.
1166 * I guess we might want to change cpupri_find() to ignore those
1167 * in the first place.
1168 */
Rusty Russell96f874e22008-11-25 02:35:14 +10301169 cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
Max Krasnyanskye761b772008-07-15 04:43:49 -07001170
1171 /*
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001172 * At this point we have built a mask of cpus representing the
1173 * lowest priority tasks in the system. Now we want to elect
1174 * the best one based on our affinity and topology.
1175 *
1176 * We prioritize the last cpu that the task executed on since
1177 * it is most likely cache-hot in that location.
1178 */
Rusty Russell96f874e22008-11-25 02:35:14 +10301179 if (cpumask_test_cpu(cpu, lowest_mask))
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001180 return cpu;
1181
1182 /*
1183 * Otherwise, we consult the sched_domains span maps to figure
1184 * out which cpu is logically closest to our hot cache data.
1185 */
1186 if (this_cpu == cpu)
1187 this_cpu = -1; /* Skip this_cpu opt if the same */
1188
Mike Travisd38b2232009-01-10 21:58:11 -08001189 if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
1190 for_each_domain(cpu, sd) {
1191 if (sd->flags & SD_WAKE_AFFINE) {
1192 int best_cpu;
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001193
Mike Travisd38b2232009-01-10 21:58:11 -08001194 cpumask_and(domain_mask,
1195 sched_domain_span(sd),
1196 lowest_mask);
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001197
Mike Travisd38b2232009-01-10 21:58:11 -08001198 best_cpu = pick_optimal_cpu(this_cpu,
1199 domain_mask);
1200
1201 if (best_cpu != -1) {
1202 free_cpumask_var(domain_mask);
1203 return best_cpu;
1204 }
1205 }
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001206 }
Mike Travisd38b2232009-01-10 21:58:11 -08001207 free_cpumask_var(domain_mask);
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001208 }
1209
1210 /*
1211 * And finally, if there were no matches within the domains
1212 * just give the caller *something* to work with from the compatible
1213 * locations.
1214 */
1215 return pick_optimal_cpu(this_cpu, lowest_mask);
Gregory Haskins07b40322008-01-25 21:08:10 +01001216}
1217
Steven Rostedte8fa1362008-01-25 21:08:05 +01001218/* Will lock the rq it finds */
Ingo Molnar4df64c02008-01-25 21:08:15 +01001219static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001220{
1221 struct rq *lowest_rq = NULL;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001222 int tries;
Ingo Molnar4df64c02008-01-25 21:08:15 +01001223 int cpu;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001224
1225 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
Gregory Haskins07b40322008-01-25 21:08:10 +01001226 cpu = find_lowest_rq(task);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001227
Gregory Haskins2de0b462008-01-25 21:08:10 +01001228 if ((cpu == -1) || (cpu == rq->cpu))
Steven Rostedte8fa1362008-01-25 21:08:05 +01001229 break;
1230
Gregory Haskins07b40322008-01-25 21:08:10 +01001231 lowest_rq = cpu_rq(cpu);
1232
Steven Rostedte8fa1362008-01-25 21:08:05 +01001233 /* if the prio of this runqueue changed, try again */
Gregory Haskins07b40322008-01-25 21:08:10 +01001234 if (double_lock_balance(rq, lowest_rq)) {
Steven Rostedte8fa1362008-01-25 21:08:05 +01001235 /*
1236 * We had to unlock the run queue. In
1237 * the mean time, task could have
1238 * migrated already or had its affinity changed.
1239 * Also make sure that it wasn't scheduled on its rq.
1240 */
Gregory Haskins07b40322008-01-25 21:08:10 +01001241 if (unlikely(task_rq(task) != rq ||
Rusty Russell96f874e22008-11-25 02:35:14 +10301242 !cpumask_test_cpu(lowest_rq->cpu,
1243 &task->cpus_allowed) ||
Gregory Haskins07b40322008-01-25 21:08:10 +01001244 task_running(rq, task) ||
Steven Rostedte8fa1362008-01-25 21:08:05 +01001245 !task->se.on_rq)) {
Ingo Molnar4df64c02008-01-25 21:08:15 +01001246
Steven Rostedte8fa1362008-01-25 21:08:05 +01001247 spin_unlock(&lowest_rq->lock);
1248 lowest_rq = NULL;
1249 break;
1250 }
1251 }
1252
1253 /* If this rq is still suitable use it. */
Gregory Haskinse864c492008-12-29 09:39:49 -05001254 if (lowest_rq->rt.highest_prio.curr > task->prio)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001255 break;
1256
1257 /* try again */
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001258 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001259 lowest_rq = NULL;
1260 }
1261
1262 return lowest_rq;
1263}
1264
Gregory Haskins917b6272008-12-29 09:39:53 -05001265static inline int has_pushable_tasks(struct rq *rq)
1266{
1267 return !plist_head_empty(&rq->rt.pushable_tasks);
1268}
1269
1270static struct task_struct *pick_next_pushable_task(struct rq *rq)
1271{
1272 struct task_struct *p;
1273
1274 if (!has_pushable_tasks(rq))
1275 return NULL;
1276
1277 p = plist_first_entry(&rq->rt.pushable_tasks,
1278 struct task_struct, pushable_tasks);
1279
1280 BUG_ON(rq->cpu != task_cpu(p));
1281 BUG_ON(task_current(rq, p));
1282 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1283
1284 BUG_ON(!p->se.on_rq);
1285 BUG_ON(!rt_task(p));
1286
1287 return p;
1288}
1289
Steven Rostedte8fa1362008-01-25 21:08:05 +01001290/*
1291 * If the current CPU has more than one RT task, see if the non
1292 * running task can migrate over to a CPU that is running a task
1293 * of lesser priority.
1294 */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001295static int push_rt_task(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001296{
1297 struct task_struct *next_task;
1298 struct rq *lowest_rq;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001299
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +01001300 if (!rq->rt.overloaded)
1301 return 0;
1302
Gregory Haskins917b6272008-12-29 09:39:53 -05001303 next_task = pick_next_pushable_task(rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001304 if (!next_task)
1305 return 0;
1306
1307 retry:
Gregory Haskins697f0a42008-01-25 21:08:09 +01001308 if (unlikely(next_task == rq->curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001309 WARN_ON(1);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001310 return 0;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001311 }
Steven Rostedte8fa1362008-01-25 21:08:05 +01001312
1313 /*
1314 * It's possible that the next_task slipped in of
1315 * higher priority than current. If that's the case
1316 * just reschedule current.
1317 */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001318 if (unlikely(next_task->prio < rq->curr->prio)) {
1319 resched_task(rq->curr);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001320 return 0;
1321 }
1322
Gregory Haskins697f0a42008-01-25 21:08:09 +01001323 /* We might release rq lock */
Steven Rostedte8fa1362008-01-25 21:08:05 +01001324 get_task_struct(next_task);
1325
1326 /* find_lock_lowest_rq locks the rq if found */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001327 lowest_rq = find_lock_lowest_rq(next_task, rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001328 if (!lowest_rq) {
1329 struct task_struct *task;
1330 /*
Gregory Haskins697f0a42008-01-25 21:08:09 +01001331 * find lock_lowest_rq releases rq->lock
Gregory Haskins15635132008-12-29 09:39:53 -05001332 * so it is possible that next_task has migrated.
1333 *
1334 * We need to make sure that the task is still on the same
1335 * run-queue and is also still the next task eligible for
1336 * pushing.
Steven Rostedte8fa1362008-01-25 21:08:05 +01001337 */
Gregory Haskins917b6272008-12-29 09:39:53 -05001338 task = pick_next_pushable_task(rq);
Gregory Haskins15635132008-12-29 09:39:53 -05001339 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1340 /*
1341 * If we get here, the task hasnt moved at all, but
1342 * it has failed to push. We will not try again,
1343 * since the other cpus will pull from us when they
1344 * are ready.
1345 */
1346 dequeue_pushable_task(rq, next_task);
1347 goto out;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001348 }
Gregory Haskins917b6272008-12-29 09:39:53 -05001349
Gregory Haskins15635132008-12-29 09:39:53 -05001350 if (!task)
1351 /* No more tasks, just exit */
1352 goto out;
1353
Gregory Haskins917b6272008-12-29 09:39:53 -05001354 /*
Gregory Haskins15635132008-12-29 09:39:53 -05001355 * Something has shifted, try again.
Gregory Haskins917b6272008-12-29 09:39:53 -05001356 */
Gregory Haskins15635132008-12-29 09:39:53 -05001357 put_task_struct(next_task);
1358 next_task = task;
1359 goto retry;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001360 }
1361
Gregory Haskins697f0a42008-01-25 21:08:09 +01001362 deactivate_task(rq, next_task, 0);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001363 set_task_cpu(next_task, lowest_rq->cpu);
1364 activate_task(lowest_rq, next_task, 0);
1365
1366 resched_task(lowest_rq->curr);
1367
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001368 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001369
Steven Rostedte8fa1362008-01-25 21:08:05 +01001370out:
1371 put_task_struct(next_task);
1372
Gregory Haskins917b6272008-12-29 09:39:53 -05001373 return 1;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001374}
1375
Steven Rostedte8fa1362008-01-25 21:08:05 +01001376static void push_rt_tasks(struct rq *rq)
1377{
1378 /* push_rt_task will return true if it moved an RT */
1379 while (push_rt_task(rq))
1380 ;
1381}
1382
Steven Rostedtf65eda42008-01-25 21:08:07 +01001383static int pull_rt_task(struct rq *this_rq)
1384{
Ingo Molnar80bf3172008-01-25 21:08:17 +01001385 int this_cpu = this_rq->cpu, ret = 0, cpu;
Gregory Haskinsa8728942008-12-29 09:39:49 -05001386 struct task_struct *p;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001387 struct rq *src_rq;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001388
Gregory Haskins637f5082008-01-25 21:08:18 +01001389 if (likely(!rt_overloaded(this_rq)))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001390 return 0;
1391
Rusty Russellc6c49272008-11-25 02:35:05 +10301392 for_each_cpu(cpu, this_rq->rd->rto_mask) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001393 if (this_cpu == cpu)
1394 continue;
1395
1396 src_rq = cpu_rq(cpu);
Gregory Haskins74ab8e42008-12-29 09:39:50 -05001397
1398 /*
1399 * Don't bother taking the src_rq->lock if the next highest
1400 * task is known to be lower-priority than our current task.
1401 * This may look racy, but if this value is about to go
1402 * logically higher, the src_rq will push this task away.
1403 * And if its going logically lower, we do not care
1404 */
1405 if (src_rq->rt.highest_prio.next >=
1406 this_rq->rt.highest_prio.curr)
1407 continue;
1408
Steven Rostedtf65eda42008-01-25 21:08:07 +01001409 /*
1410 * We can potentially drop this_rq's lock in
1411 * double_lock_balance, and another CPU could
Gregory Haskinsa8728942008-12-29 09:39:49 -05001412 * alter this_rq
Steven Rostedtf65eda42008-01-25 21:08:07 +01001413 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001414 double_lock_balance(this_rq, src_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001415
1416 /*
1417 * Are there still pullable RT tasks?
1418 */
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001419 if (src_rq->rt.rt_nr_running <= 1)
1420 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001421
Steven Rostedtf65eda42008-01-25 21:08:07 +01001422 p = pick_next_highest_task_rt(src_rq, this_cpu);
1423
1424 /*
1425 * Do we have an RT task that preempts
1426 * the to-be-scheduled task?
1427 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001428 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001429 WARN_ON(p == src_rq->curr);
1430 WARN_ON(!p->se.on_rq);
1431
1432 /*
1433 * There's a chance that p is higher in priority
1434 * than what's currently running on its cpu.
1435 * This is just that p is wakeing up and hasn't
1436 * had a chance to schedule. We only pull
1437 * p if it is lower in priority than the
Gregory Haskinsa8728942008-12-29 09:39:49 -05001438 * current task on the run queue
Steven Rostedtf65eda42008-01-25 21:08:07 +01001439 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001440 if (p->prio < src_rq->curr->prio)
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001441 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001442
1443 ret = 1;
1444
1445 deactivate_task(src_rq, p, 0);
1446 set_task_cpu(p, this_cpu);
1447 activate_task(this_rq, p, 0);
1448 /*
1449 * We continue with the search, just in
1450 * case there's an even higher prio task
1451 * in another runqueue. (low likelyhood
1452 * but possible)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001453 */
Steven Rostedtf65eda42008-01-25 21:08:07 +01001454 }
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001455 skip:
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001456 double_unlock_balance(this_rq, src_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001457 }
1458
1459 return ret;
1460}
1461
Steven Rostedt9a897c52008-01-25 21:08:22 +01001462static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001463{
1464 /* Try to pull RT tasks here if we lower this rq's prio */
Gregory Haskinse864c492008-12-29 09:39:49 -05001465 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001466 pull_rt_task(rq);
1467}
1468
Gregory Haskins967fc042008-12-29 09:39:52 -05001469/*
1470 * assumes rq->lock is held
1471 */
1472static int needs_post_schedule_rt(struct rq *rq)
1473{
Gregory Haskins917b6272008-12-29 09:39:53 -05001474 return has_pushable_tasks(rq);
Gregory Haskins967fc042008-12-29 09:39:52 -05001475}
1476
Steven Rostedt9a897c52008-01-25 21:08:22 +01001477static void post_schedule_rt(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001478{
1479 /*
Gregory Haskins967fc042008-12-29 09:39:52 -05001480 * This is only called if needs_post_schedule_rt() indicates that
1481 * we need to push tasks away
Steven Rostedte8fa1362008-01-25 21:08:05 +01001482 */
Gregory Haskins967fc042008-12-29 09:39:52 -05001483 spin_lock_irq(&rq->lock);
1484 push_rt_tasks(rq);
1485 spin_unlock_irq(&rq->lock);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001486}
1487
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001488/*
1489 * If we are not running and we are not going to reschedule soon, we should
1490 * try to push tasks away now
1491 */
Steven Rostedt9a897c52008-01-25 21:08:22 +01001492static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
Steven Rostedt4642daf2008-01-25 21:08:07 +01001493{
Steven Rostedt9a897c52008-01-25 21:08:22 +01001494 if (!task_running(rq, p) &&
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001495 !test_tsk_need_resched(rq->curr) &&
Gregory Haskins917b6272008-12-29 09:39:53 -05001496 has_pushable_tasks(rq) &&
Gregory Haskins777c2f32008-12-29 09:39:50 -05001497 p->rt.nr_cpus_allowed > 1)
Steven Rostedt4642daf2008-01-25 21:08:07 +01001498 push_rt_tasks(rq);
1499}
1500
Peter Williams43010652007-08-09 11:16:46 +02001501static unsigned long
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001502load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
Peter Williamse1d14842007-10-24 18:23:51 +02001503 unsigned long max_load_move,
1504 struct sched_domain *sd, enum cpu_idle_type idle,
1505 int *all_pinned, int *this_best_prio)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001506{
Steven Rostedtc7a1e462008-01-25 21:08:07 +01001507 /* don't touch RT tasks */
1508 return 0;
Peter Williamse1d14842007-10-24 18:23:51 +02001509}
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001510
Peter Williamse1d14842007-10-24 18:23:51 +02001511static int
1512move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1513 struct sched_domain *sd, enum cpu_idle_type idle)
1514{
Steven Rostedtc7a1e462008-01-25 21:08:07 +01001515 /* don't touch RT tasks */
1516 return 0;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001517}
Ingo Molnardeeeccd2008-01-25 21:08:15 +01001518
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001519static void set_cpus_allowed_rt(struct task_struct *p,
Rusty Russell96f874e22008-11-25 02:35:14 +10301520 const struct cpumask *new_mask)
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001521{
Rusty Russell96f874e22008-11-25 02:35:14 +10301522 int weight = cpumask_weight(new_mask);
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001523
1524 BUG_ON(!rt_task(p));
1525
1526 /*
1527 * Update the migration status of the RQ if we have an RT task
1528 * which is running AND changing its weight value.
1529 */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001530 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001531 struct rq *rq = task_rq(p);
1532
Gregory Haskins917b6272008-12-29 09:39:53 -05001533 if (!task_current(rq, p)) {
1534 /*
1535 * Make sure we dequeue this task from the pushable list
1536 * before going further. It will either remain off of
1537 * the list because we are no longer pushable, or it
1538 * will be requeued.
1539 */
1540 if (p->rt.nr_cpus_allowed > 1)
1541 dequeue_pushable_task(rq, p);
1542
1543 /*
1544 * Requeue if our weight is changing and still > 1
1545 */
1546 if (weight > 1)
1547 enqueue_pushable_task(rq, p);
1548
1549 }
1550
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001551 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001552 rq->rt.rt_nr_migratory++;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001553 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001554 BUG_ON(!rq->rt.rt_nr_migratory);
1555 rq->rt.rt_nr_migratory--;
1556 }
1557
Gregory Haskins398a1532009-01-14 09:10:04 -05001558 update_rt_migration(&rq->rt);
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001559 }
1560
Rusty Russell96f874e22008-11-25 02:35:14 +10301561 cpumask_copy(&p->cpus_allowed, new_mask);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001562 p->rt.nr_cpus_allowed = weight;
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001563}
Ingo Molnardeeeccd2008-01-25 21:08:15 +01001564
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001565/* Assumes rq->lock is held */
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -04001566static void rq_online_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001567{
1568 if (rq->rt.overloaded)
1569 rt_set_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001570
Peter Zijlstra7def2be2008-06-05 14:49:58 +02001571 __enable_runtime(rq);
1572
Gregory Haskinse864c492008-12-29 09:39:49 -05001573 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001574}
1575
1576/* Assumes rq->lock is held */
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -04001577static void rq_offline_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001578{
1579 if (rq->rt.overloaded)
1580 rt_clear_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001581
Peter Zijlstra7def2be2008-06-05 14:49:58 +02001582 __disable_runtime(rq);
1583
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001584 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001585}
Steven Rostedtcb469842008-01-25 21:08:22 +01001586
1587/*
1588 * When switch from the rt queue, we bring ourselves to a position
1589 * that we might want to pull RT tasks from other runqueues.
1590 */
1591static void switched_from_rt(struct rq *rq, struct task_struct *p,
1592 int running)
1593{
1594 /*
1595 * If there are other RT tasks then we will reschedule
1596 * and the scheduling of the other RT tasks will handle
1597 * the balancing. But if we are the last RT task
1598 * we may need to handle the pulling of RT tasks
1599 * now.
1600 */
1601 if (!rq->rt.rt_nr_running)
1602 pull_rt_task(rq);
1603}
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10301604
1605static inline void init_sched_rt_class(void)
1606{
1607 unsigned int i;
1608
1609 for_each_possible_cpu(i)
Yinghai Lueaa95842009-06-06 14:51:36 -07001610 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
Mike Travis6ca09df2008-12-31 18:08:45 -08001611 GFP_KERNEL, cpu_to_node(i));
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10301612}
Steven Rostedte8fa1362008-01-25 21:08:05 +01001613#endif /* CONFIG_SMP */
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001614
Steven Rostedtcb469842008-01-25 21:08:22 +01001615/*
1616 * When switching a task to RT, we may overload the runqueue
1617 * with RT tasks. In this case we try to push them off to
1618 * other runqueues.
1619 */
1620static void switched_to_rt(struct rq *rq, struct task_struct *p,
1621 int running)
1622{
1623 int check_resched = 1;
1624
1625 /*
1626 * If we are already running, then there's nothing
1627 * that needs to be done. But if we are not running
1628 * we may need to preempt the current running task.
1629 * If that current running task is also an RT task
1630 * then see if we can move to another run queue.
1631 */
1632 if (!running) {
1633#ifdef CONFIG_SMP
1634 if (rq->rt.overloaded && push_rt_task(rq) &&
1635 /* Don't resched if we changed runqueues */
1636 rq != task_rq(p))
1637 check_resched = 0;
1638#endif /* CONFIG_SMP */
1639 if (check_resched && p->prio < rq->curr->prio)
1640 resched_task(rq->curr);
1641 }
1642}
1643
1644/*
1645 * Priority of the task has changed. This may cause
1646 * us to initiate a push or pull.
1647 */
1648static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1649 int oldprio, int running)
1650{
1651 if (running) {
1652#ifdef CONFIG_SMP
1653 /*
1654 * If our priority decreases while running, we
1655 * may need to pull tasks to this runqueue.
1656 */
1657 if (oldprio < p->prio)
1658 pull_rt_task(rq);
1659 /*
1660 * If there's a higher priority task waiting to run
Steven Rostedt6fa46fa2008-03-05 10:00:12 -05001661 * then reschedule. Note, the above pull_rt_task
1662 * can release the rq lock and p could migrate.
1663 * Only reschedule if p is still on the same runqueue.
Steven Rostedtcb469842008-01-25 21:08:22 +01001664 */
Gregory Haskinse864c492008-12-29 09:39:49 -05001665 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01001666 resched_task(p);
1667#else
1668 /* For UP simply resched on drop of prio */
1669 if (oldprio < p->prio)
1670 resched_task(p);
1671#endif /* CONFIG_SMP */
1672 } else {
1673 /*
1674 * This task is not running, but if it is
1675 * greater than the current running task
1676 * then reschedule.
1677 */
1678 if (p->prio < rq->curr->prio)
1679 resched_task(rq->curr);
1680 }
1681}
1682
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001683static void watchdog(struct rq *rq, struct task_struct *p)
1684{
1685 unsigned long soft, hard;
1686
1687 if (!p->signal)
1688 return;
1689
1690 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1691 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1692
1693 if (soft != RLIM_INFINITY) {
1694 unsigned long next;
1695
1696 p->rt.timeout++;
1697 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
Peter Zijlstra5a52dd52008-01-25 21:08:32 +01001698 if (p->rt.timeout > next)
Frank Mayharf06febc2008-09-12 09:54:39 -07001699 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001700 }
1701}
Steven Rostedtcb469842008-01-25 21:08:22 +01001702
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001703static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001704{
Peter Zijlstra67e2be02007-12-20 15:01:17 +01001705 update_curr_rt(rq);
1706
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001707 watchdog(rq, p);
1708
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001709 /*
1710 * RR tasks need a special form of timeslice management.
1711 * FIFO tasks have no timeslices.
1712 */
1713 if (p->policy != SCHED_RR)
1714 return;
1715
Peter Zijlstrafa717062008-01-25 21:08:27 +01001716 if (--p->rt.time_slice)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001717 return;
1718
Peter Zijlstrafa717062008-01-25 21:08:27 +01001719 p->rt.time_slice = DEF_TIMESLICE;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001720
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001721 /*
1722 * Requeue to the end of queue if we are not the only element
1723 * on the queue:
1724 */
Peter Zijlstrafa717062008-01-25 21:08:27 +01001725 if (p->rt.run_list.prev != p->rt.run_list.next) {
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001726 requeue_task_rt(rq, p, 0);
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001727 set_tsk_need_resched(p);
1728 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001729}
1730
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001731static void set_curr_task_rt(struct rq *rq)
1732{
1733 struct task_struct *p = rq->curr;
1734
1735 p->se.exec_start = rq->clock;
Gregory Haskins917b6272008-12-29 09:39:53 -05001736
1737 /* The running task is never eligible for pushing */
1738 dequeue_pushable_task(rq, p);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001739}
1740
Harvey Harrison2abdad02008-04-25 10:53:13 -07001741static const struct sched_class rt_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02001742 .next = &fair_sched_class,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001743 .enqueue_task = enqueue_task_rt,
1744 .dequeue_task = dequeue_task_rt,
1745 .yield_task = yield_task_rt,
1746
1747 .check_preempt_curr = check_preempt_curr_rt,
1748
1749 .pick_next_task = pick_next_task_rt,
1750 .put_prev_task = put_prev_task_rt,
1751
Peter Williams681f3e62007-10-24 18:23:51 +02001752#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08001753 .select_task_rq = select_task_rq_rt,
1754
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001755 .load_balance = load_balance_rt,
Peter Williamse1d14842007-10-24 18:23:51 +02001756 .move_one_task = move_one_task_rt,
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001757 .set_cpus_allowed = set_cpus_allowed_rt,
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -04001758 .rq_online = rq_online_rt,
1759 .rq_offline = rq_offline_rt,
Steven Rostedt9a897c52008-01-25 21:08:22 +01001760 .pre_schedule = pre_schedule_rt,
Gregory Haskins967fc042008-12-29 09:39:52 -05001761 .needs_post_schedule = needs_post_schedule_rt,
Steven Rostedt9a897c52008-01-25 21:08:22 +01001762 .post_schedule = post_schedule_rt,
1763 .task_wake_up = task_wake_up_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001764 .switched_from = switched_from_rt,
Peter Williams681f3e62007-10-24 18:23:51 +02001765#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001766
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001767 .set_curr_task = set_curr_task_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001768 .task_tick = task_tick_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001769
1770 .prio_changed = prio_changed_rt,
1771 .switched_to = switched_to_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001772};
Peter Zijlstraada18de2008-06-19 14:22:24 +02001773
1774#ifdef CONFIG_SCHED_DEBUG
1775extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1776
1777static void print_rt_stats(struct seq_file *m, int cpu)
1778{
1779 struct rt_rq *rt_rq;
1780
1781 rcu_read_lock();
1782 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1783 print_rt_rq(m, cpu, rt_rq);
1784 rcu_read_unlock();
1785}
Dhaval Giani55e12e52008-06-24 23:39:43 +05301786#endif /* CONFIG_SCHED_DEBUG */
Rusty Russell0e3900e2008-11-25 02:35:13 +10301787