Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1 | #include "sched.h" |
| 2 | |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 3 | #include <linux/proc_fs.h> |
| 4 | #include <linux/seq_file.h> |
| 5 | #include <linux/kallsyms.h> |
| 6 | #include <linux/utsname.h> |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 7 | #include <linux/security.h> |
| 8 | #include <linux/export.h> |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 9 | |
| 10 | unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; |
| 11 | static struct autogroup autogroup_default; |
| 12 | static atomic_t autogroup_seq_nr; |
| 13 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 14 | void __init autogroup_init(struct task_struct *init_task) |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 15 | { |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 16 | autogroup_default.tg = &root_task_group; |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 17 | kref_init(&autogroup_default.kref); |
| 18 | init_rwsem(&autogroup_default.lock); |
| 19 | init_task->signal->autogroup = &autogroup_default; |
| 20 | } |
| 21 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 22 | void autogroup_free(struct task_group *tg) |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 23 | { |
| 24 | kfree(tg->autogroup); |
| 25 | } |
| 26 | |
| 27 | static inline void autogroup_destroy(struct kref *kref) |
| 28 | { |
| 29 | struct autogroup *ag = container_of(kref, struct autogroup, kref); |
| 30 | |
Mike Galbraith | f449377 | 2011-01-13 04:54:50 +0100 | [diff] [blame] | 31 | #ifdef CONFIG_RT_GROUP_SCHED |
| 32 | /* We've redirected RT tasks to the root task group... */ |
| 33 | ag->tg->rt_se = NULL; |
| 34 | ag->tg->rt_rq = NULL; |
| 35 | #endif |
Li Zefan | ace783b | 2013-01-24 14:30:48 +0800 | [diff] [blame] | 36 | sched_offline_group(ag->tg); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 37 | sched_destroy_group(ag->tg); |
| 38 | } |
| 39 | |
| 40 | static inline void autogroup_kref_put(struct autogroup *ag) |
| 41 | { |
| 42 | kref_put(&ag->kref, autogroup_destroy); |
| 43 | } |
| 44 | |
| 45 | static inline struct autogroup *autogroup_kref_get(struct autogroup *ag) |
| 46 | { |
| 47 | kref_get(&ag->kref); |
| 48 | return ag; |
| 49 | } |
| 50 | |
Mike Galbraith | 4f82198 | 2010-12-16 15:09:52 +0100 | [diff] [blame] | 51 | static inline struct autogroup *autogroup_task_get(struct task_struct *p) |
| 52 | { |
| 53 | struct autogroup *ag; |
| 54 | unsigned long flags; |
| 55 | |
| 56 | if (!lock_task_sighand(p, &flags)) |
| 57 | return autogroup_kref_get(&autogroup_default); |
| 58 | |
| 59 | ag = autogroup_kref_get(p->signal->autogroup); |
| 60 | unlock_task_sighand(p, &flags); |
| 61 | |
| 62 | return ag; |
| 63 | } |
| 64 | |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 65 | static inline struct autogroup *autogroup_create(void) |
| 66 | { |
| 67 | struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); |
| 68 | struct task_group *tg; |
| 69 | |
| 70 | if (!ag) |
| 71 | goto out_fail; |
| 72 | |
Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 73 | tg = sched_create_group(&root_task_group); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 74 | |
| 75 | if (IS_ERR(tg)) |
| 76 | goto out_free; |
| 77 | |
| 78 | kref_init(&ag->kref); |
| 79 | init_rwsem(&ag->lock); |
| 80 | ag->id = atomic_inc_return(&autogroup_seq_nr); |
| 81 | ag->tg = tg; |
Mike Galbraith | f449377 | 2011-01-13 04:54:50 +0100 | [diff] [blame] | 82 | #ifdef CONFIG_RT_GROUP_SCHED |
| 83 | /* |
| 84 | * Autogroup RT tasks are redirected to the root task group |
| 85 | * so we don't have to move tasks around upon policy change, |
| 86 | * or flail around trying to allocate bandwidth on the fly. |
| 87 | * A bandwidth exception in __sched_setscheduler() allows |
Peter Zijlstra | 1fe89e1 | 2015-02-09 11:53:18 +0100 | [diff] [blame] | 88 | * the policy change to proceed. |
Mike Galbraith | f449377 | 2011-01-13 04:54:50 +0100 | [diff] [blame] | 89 | */ |
| 90 | free_rt_sched_group(tg); |
| 91 | tg->rt_se = root_task_group.rt_se; |
| 92 | tg->rt_rq = root_task_group.rt_rq; |
| 93 | #endif |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 94 | tg->autogroup = ag; |
| 95 | |
Gerald Schaefer | 41261b6 | 2013-05-24 18:07:49 +0200 | [diff] [blame] | 96 | sched_online_group(tg, &root_task_group); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 97 | return ag; |
| 98 | |
| 99 | out_free: |
| 100 | kfree(ag); |
| 101 | out_fail: |
| 102 | if (printk_ratelimit()) { |
| 103 | printk(KERN_WARNING "autogroup_create: %s failure.\n", |
| 104 | ag ? "sched_create_group()" : "kmalloc()"); |
| 105 | } |
| 106 | |
| 107 | return autogroup_kref_get(&autogroup_default); |
| 108 | } |
| 109 | |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 110 | bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 111 | { |
| 112 | if (tg != &root_task_group) |
| 113 | return false; |
| 114 | |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 115 | /* |
| 116 | * We can only assume the task group can't go away on us if |
| 117 | * autogroup_move_group() can see us on ->thread_group list. |
| 118 | */ |
| 119 | if (p->flags & PF_EXITING) |
| 120 | return false; |
| 121 | |
| 122 | return true; |
| 123 | } |
| 124 | |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 125 | static void |
| 126 | autogroup_move_group(struct task_struct *p, struct autogroup *ag) |
| 127 | { |
| 128 | struct autogroup *prev; |
| 129 | struct task_struct *t; |
| 130 | unsigned long flags; |
| 131 | |
| 132 | BUG_ON(!lock_task_sighand(p, &flags)); |
| 133 | |
| 134 | prev = p->signal->autogroup; |
| 135 | if (prev == ag) { |
| 136 | unlock_task_sighand(p, &flags); |
| 137 | return; |
| 138 | } |
| 139 | |
| 140 | p->signal->autogroup = autogroup_kref_get(ag); |
| 141 | |
Jason Low | 316c1608d | 2015-04-28 13:00:20 -0700 | [diff] [blame] | 142 | if (!READ_ONCE(sysctl_sched_autogroup_enabled)) |
Ingo Molnar | c1ad41f | 2012-12-11 10:23:45 +0100 | [diff] [blame] | 143 | goto out; |
| 144 | |
Oleg Nesterov | 5aface5 | 2014-08-13 21:20:03 +0200 | [diff] [blame] | 145 | for_each_thread(p, t) |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 146 | sched_move_task(t); |
Ingo Molnar | c1ad41f | 2012-12-11 10:23:45 +0100 | [diff] [blame] | 147 | out: |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 148 | unlock_task_sighand(p, &flags); |
| 149 | autogroup_kref_put(prev); |
| 150 | } |
| 151 | |
| 152 | /* Allocates GFP_KERNEL, cannot be called under any spinlock */ |
| 153 | void sched_autogroup_create_attach(struct task_struct *p) |
| 154 | { |
Ingo Molnar | c1ad41f | 2012-12-11 10:23:45 +0100 | [diff] [blame] | 155 | struct autogroup *ag = autogroup_create(); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 156 | |
| 157 | autogroup_move_group(p, ag); |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 158 | /* drop extra reference added by autogroup_create() */ |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 159 | autogroup_kref_put(ag); |
| 160 | } |
| 161 | EXPORT_SYMBOL(sched_autogroup_create_attach); |
| 162 | |
| 163 | /* Cannot be called under siglock. Currently has no users */ |
| 164 | void sched_autogroup_detach(struct task_struct *p) |
| 165 | { |
| 166 | autogroup_move_group(p, &autogroup_default); |
| 167 | } |
| 168 | EXPORT_SYMBOL(sched_autogroup_detach); |
| 169 | |
| 170 | void sched_autogroup_fork(struct signal_struct *sig) |
| 171 | { |
Mike Galbraith | 4f82198 | 2010-12-16 15:09:52 +0100 | [diff] [blame] | 172 | sig->autogroup = autogroup_task_get(current); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | void sched_autogroup_exit(struct signal_struct *sig) |
| 176 | { |
| 177 | autogroup_kref_put(sig->autogroup); |
| 178 | } |
| 179 | |
| 180 | static int __init setup_autogroup(char *str) |
| 181 | { |
| 182 | sysctl_sched_autogroup_enabled = 0; |
| 183 | |
| 184 | return 1; |
| 185 | } |
| 186 | |
| 187 | __setup("noautogroup", setup_autogroup); |
| 188 | |
Ingo Molnar | c1ad41f | 2012-12-11 10:23:45 +0100 | [diff] [blame] | 189 | #ifdef CONFIG_PROC_FS |
| 190 | |
| 191 | int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) |
| 192 | { |
| 193 | static unsigned long next = INITIAL_JIFFIES; |
| 194 | struct autogroup *ag; |
| 195 | int err; |
| 196 | |
Dongsheng Yang | 75e45d5 | 2014-02-11 15:34:50 +0800 | [diff] [blame] | 197 | if (nice < MIN_NICE || nice > MAX_NICE) |
Ingo Molnar | c1ad41f | 2012-12-11 10:23:45 +0100 | [diff] [blame] | 198 | return -EINVAL; |
| 199 | |
| 200 | err = security_task_setnice(current, nice); |
| 201 | if (err) |
| 202 | return err; |
| 203 | |
| 204 | if (nice < 0 && !can_nice(current, nice)) |
| 205 | return -EPERM; |
| 206 | |
| 207 | /* this is a heavy operation taking global locks.. */ |
| 208 | if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) |
| 209 | return -EAGAIN; |
| 210 | |
| 211 | next = HZ / 10 + jiffies; |
| 212 | ag = autogroup_task_get(p); |
| 213 | |
| 214 | down_write(&ag->lock); |
| 215 | err = sched_group_set_shares(ag->tg, prio_to_weight[nice + 20]); |
| 216 | if (!err) |
| 217 | ag->nice = nice; |
| 218 | up_write(&ag->lock); |
| 219 | |
| 220 | autogroup_kref_put(ag); |
| 221 | |
| 222 | return err; |
| 223 | } |
| 224 | |
| 225 | void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) |
| 226 | { |
| 227 | struct autogroup *ag = autogroup_task_get(p); |
| 228 | |
| 229 | if (!task_group_is_autogroup(ag->tg)) |
| 230 | goto out; |
| 231 | |
| 232 | down_read(&ag->lock); |
| 233 | seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); |
| 234 | up_read(&ag->lock); |
| 235 | |
| 236 | out: |
| 237 | autogroup_kref_put(ag); |
| 238 | } |
| 239 | #endif /* CONFIG_PROC_FS */ |
| 240 | |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 241 | #ifdef CONFIG_SCHED_DEBUG |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 242 | int autogroup_path(struct task_group *tg, char *buf, int buflen) |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 243 | { |
Mike Galbraith | 511f67a | 2011-02-22 15:02:00 +0100 | [diff] [blame] | 244 | if (!task_group_is_autogroup(tg)) |
Bharata B Rao | 8ecedd7 | 2011-01-11 15:42:57 +0530 | [diff] [blame] | 245 | return 0; |
| 246 | |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 247 | return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); |
| 248 | } |
| 249 | #endif /* CONFIG_SCHED_DEBUG */ |