Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | d90a2f1 | 2021-11-20 10:39:20 +0100 | [diff] [blame] | 2 | #ifndef _KERNEL_SCHED_AUTOGROUP_H |
| 3 | #define _KERNEL_SCHED_AUTOGROUP_H |
| 4 | |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 5 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 6 | |
| 7 | struct autogroup { |
Ingo Molnar | c1ad41f | 2012-12-11 10:23:45 +0100 | [diff] [blame] | 8 | /* |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 9 | * Reference doesn't mean how many threads attach to this |
| 10 | * autogroup now. It just stands for the number of tasks |
| 11 | * which could use this autogroup. |
Ingo Molnar | c1ad41f | 2012-12-11 10:23:45 +0100 | [diff] [blame] | 12 | */ |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 13 | struct kref kref; |
| 14 | struct task_group *tg; |
| 15 | struct rw_semaphore lock; |
| 16 | unsigned long id; |
| 17 | int nice; |
| 18 | }; |
| 19 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 20 | extern void autogroup_init(struct task_struct *init_task); |
| 21 | extern void autogroup_free(struct task_group *tg); |
| 22 | |
| 23 | static inline bool task_group_is_autogroup(struct task_group *tg) |
| 24 | { |
| 25 | return !!tg->autogroup; |
| 26 | } |
| 27 | |
| 28 | extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg); |
| 29 | |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 30 | static inline struct task_group * |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 31 | autogroup_task_group(struct task_struct *p, struct task_group *tg) |
| 32 | { |
Zhen Ni | c8eaf6a | 2022-01-28 17:50:25 +0800 | [diff] [blame] | 33 | extern unsigned int sysctl_sched_autogroup_enabled; |
Jason Low | 316c1608d | 2015-04-28 13:00:20 -0700 | [diff] [blame] | 34 | int enabled = READ_ONCE(sysctl_sched_autogroup_enabled); |
Ingo Molnar | c1ad41f | 2012-12-11 10:23:45 +0100 | [diff] [blame] | 35 | |
| 36 | if (enabled && task_wants_autogroup(p, tg)) |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 37 | return p->signal->autogroup->tg; |
| 38 | |
| 39 | return tg; |
| 40 | } |
| 41 | |
| 42 | extern int autogroup_path(struct task_group *tg, char *buf, int buflen); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 43 | |
| 44 | #else /* !CONFIG_SCHED_AUTOGROUP */ |
| 45 | |
| 46 | static inline void autogroup_init(struct task_struct *init_task) { } |
| 47 | static inline void autogroup_free(struct task_group *tg) { } |
Mike Galbraith | f449377 | 2011-01-13 04:54:50 +0100 | [diff] [blame] | 48 | static inline bool task_group_is_autogroup(struct task_group *tg) |
| 49 | { |
| 50 | return 0; |
| 51 | } |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 52 | |
| 53 | static inline struct task_group * |
| 54 | autogroup_task_group(struct task_struct *p, struct task_group *tg) |
| 55 | { |
| 56 | return tg; |
| 57 | } |
| 58 | |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 59 | static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) |
| 60 | { |
| 61 | return 0; |
| 62 | } |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 63 | |
| 64 | #endif /* CONFIG_SCHED_AUTOGROUP */ |
Ingo Molnar | d90a2f1 | 2021-11-20 10:39:20 +0100 | [diff] [blame] | 65 | |
| 66 | #endif /* _KERNEL_SCHED_AUTOGROUP_H */ |